1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "thread.h"
18
19#include <pthread.h>
20#include <signal.h>
21#include <sys/resource.h>
22#include <sys/time.h>
23
24#include <algorithm>
25#include <bitset>
26#include <cerrno>
27#include <iostream>
28#include <list>
29#include <sstream>
30
31#include "arch/context.h"
32#include "art_field-inl.h"
33#include "art_method-inl.h"
34#include "base/bit_utils.h"
35#include "base/memory_tool.h"
36#include "base/mutex.h"
37#include "base/timing_logger.h"
38#include "base/to_str.h"
39#include "base/systrace.h"
40#include "class_linker-inl.h"
41#include "debugger.h"
42#include "dex_file-inl.h"
43#include "entrypoints/entrypoint_utils.h"
44#include "entrypoints/quick/quick_alloc_entrypoints.h"
45#include "gc/accounting/card_table-inl.h"
46#include "gc/accounting/heap_bitmap-inl.h"
47#include "gc/allocator/rosalloc.h"
48#include "gc/heap.h"
49#include "gc/space/space-inl.h"
50#include "handle_scope-inl.h"
51#include "indirect_reference_table-inl.h"
52#include "jni_internal.h"
53#include "mirror/class_loader.h"
54#include "mirror/class-inl.h"
55#include "mirror/object_array-inl.h"
56#include "mirror/stack_trace_element.h"
57#include "monitor.h"
58#include "oat_quick_method_header.h"
59#include "object_lock.h"
60#include "quick_exception_handler.h"
61#include "quick/quick_method_frame_info.h"
62#include "reflection.h"
63#include "runtime.h"
64#include "scoped_thread_state_change.h"
65#include "ScopedLocalRef.h"
66#include "ScopedUtfChars.h"
67#include "stack.h"
68#include "stack_map.h"
69#include "thread_list.h"
70#include "thread-inl.h"
71#include "utils.h"
72#include "verifier/method_verifier.h"
73#include "verify_object-inl.h"
74#include "well_known_classes.h"
75#include "interpreter/interpreter.h"
76
77#if ART_USE_FUTEXES
78#include "linux/futex.h"
79#include "sys/syscall.h"
80#ifndef SYS_futex
81#define SYS_futex __NR_futex
82#endif
83#endif  // ART_USE_FUTEXES
84
85namespace art {
86
87bool Thread::is_started_ = false;
88pthread_key_t Thread::pthread_key_self_;
89ConditionVariable* Thread::resume_cond_ = nullptr;
90const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA);
91bool (*Thread::is_sensitive_thread_hook_)() = nullptr;
92Thread* Thread::jit_sensitive_thread_ = nullptr;
93
94static constexpr bool kVerifyImageObjectsMarked = kIsDebugBuild;
95
96// For implicit overflow checks we reserve an extra piece of memory at the bottom
97// of the stack (lowest memory).  The higher portion of the memory
98// is protected against reads and the lower is available for use while
99// throwing the StackOverflow exception.
100constexpr size_t kStackOverflowProtectedSize = 4 * kMemoryToolStackGuardSizeScale * KB;
101
102static const char* kThreadNameDuringStartup = "<native thread without managed peer>";
103
104void Thread::InitCardTable() {
105  tlsPtr_.card_table = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin();
106}
107
108static void UnimplementedEntryPoint() {
109  UNIMPLEMENTED(FATAL);
110}
111
112void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints);
113
114void Thread::InitTlsEntryPoints() {
115  // Insert a placeholder so we can easily tell if we call an unimplemented entry point.
116  uintptr_t* begin = reinterpret_cast<uintptr_t*>(&tlsPtr_.jni_entrypoints);
117  uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(&tlsPtr_.quick_entrypoints) +
118      sizeof(tlsPtr_.quick_entrypoints));
119  for (uintptr_t* it = begin; it != end; ++it) {
120    *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
121  }
122  InitEntryPoints(&tlsPtr_.jni_entrypoints, &tlsPtr_.quick_entrypoints);
123}
124
125void Thread::InitStringEntryPoints() {
126  ScopedObjectAccess soa(this);
127  QuickEntryPoints* qpoints = &tlsPtr_.quick_entrypoints;
128  qpoints->pNewEmptyString = reinterpret_cast<void(*)()>(
129      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newEmptyString));
130  qpoints->pNewStringFromBytes_B = reinterpret_cast<void(*)()>(
131      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_B));
132  qpoints->pNewStringFromBytes_BI = reinterpret_cast<void(*)()>(
133      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BI));
134  qpoints->pNewStringFromBytes_BII = reinterpret_cast<void(*)()>(
135      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BII));
136  qpoints->pNewStringFromBytes_BIII = reinterpret_cast<void(*)()>(
137      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BIII));
138  qpoints->pNewStringFromBytes_BIIString = reinterpret_cast<void(*)()>(
139      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BIIString));
140  qpoints->pNewStringFromBytes_BString = reinterpret_cast<void(*)()>(
141      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BString));
142  qpoints->pNewStringFromBytes_BIICharset = reinterpret_cast<void(*)()>(
143      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BIICharset));
144  qpoints->pNewStringFromBytes_BCharset = reinterpret_cast<void(*)()>(
145      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BCharset));
146  qpoints->pNewStringFromChars_C = reinterpret_cast<void(*)()>(
147      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromChars_C));
148  qpoints->pNewStringFromChars_CII = reinterpret_cast<void(*)()>(
149      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromChars_CII));
150  qpoints->pNewStringFromChars_IIC = reinterpret_cast<void(*)()>(
151      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromChars_IIC));
152  qpoints->pNewStringFromCodePoints = reinterpret_cast<void(*)()>(
153      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromCodePoints));
154  qpoints->pNewStringFromString = reinterpret_cast<void(*)()>(
155      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromString));
156  qpoints->pNewStringFromStringBuffer = reinterpret_cast<void(*)()>(
157      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromStringBuffer));
158  qpoints->pNewStringFromStringBuilder = reinterpret_cast<void(*)()>(
159      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromStringBuilder));
160}
161
162void Thread::ResetQuickAllocEntryPointsForThread() {
163  ResetQuickAllocEntryPoints(&tlsPtr_.quick_entrypoints);
164}
165
166class DeoptimizationContextRecord {
167 public:
168  DeoptimizationContextRecord(const JValue& ret_val,
169                              bool is_reference,
170                              bool from_code,
171                              mirror::Throwable* pending_exception,
172                              DeoptimizationContextRecord* link)
173      : ret_val_(ret_val),
174        is_reference_(is_reference),
175        from_code_(from_code),
176        pending_exception_(pending_exception),
177        link_(link) {}
178
179  JValue GetReturnValue() const { return ret_val_; }
180  bool IsReference() const { return is_reference_; }
181  bool GetFromCode() const { return from_code_; }
182  mirror::Throwable* GetPendingException() const { return pending_exception_; }
183  DeoptimizationContextRecord* GetLink() const { return link_; }
184  mirror::Object** GetReturnValueAsGCRoot() {
185    DCHECK(is_reference_);
186    return ret_val_.GetGCRoot();
187  }
188  mirror::Object** GetPendingExceptionAsGCRoot() {
189    return reinterpret_cast<mirror::Object**>(&pending_exception_);
190  }
191
192 private:
193  // The value returned by the method at the top of the stack before deoptimization.
194  JValue ret_val_;
195
196  // Indicates whether the returned value is a reference. If so, the GC will visit it.
197  const bool is_reference_;
198
199  // Whether the context was created from an explicit deoptimization in the code.
200  const bool from_code_;
201
202  // The exception that was pending before deoptimization (or null if there was no pending
203  // exception).
204  mirror::Throwable* pending_exception_;
205
206  // A link to the previous DeoptimizationContextRecord.
207  DeoptimizationContextRecord* const link_;
208
209  DISALLOW_COPY_AND_ASSIGN(DeoptimizationContextRecord);
210};
211
212class StackedShadowFrameRecord {
213 public:
214  StackedShadowFrameRecord(ShadowFrame* shadow_frame,
215                           StackedShadowFrameType type,
216                           StackedShadowFrameRecord* link)
217      : shadow_frame_(shadow_frame),
218        type_(type),
219        link_(link) {}
220
221  ShadowFrame* GetShadowFrame() const { return shadow_frame_; }
222  StackedShadowFrameType GetType() const { return type_; }
223  StackedShadowFrameRecord* GetLink() const { return link_; }
224
225 private:
226  ShadowFrame* const shadow_frame_;
227  const StackedShadowFrameType type_;
228  StackedShadowFrameRecord* const link_;
229
230  DISALLOW_COPY_AND_ASSIGN(StackedShadowFrameRecord);
231};
232
233void Thread::PushDeoptimizationContext(const JValue& return_value,
234                                       bool is_reference,
235                                       bool from_code,
236                                       mirror::Throwable* exception) {
237  DeoptimizationContextRecord* record = new DeoptimizationContextRecord(
238      return_value,
239      is_reference,
240      from_code,
241      exception,
242      tlsPtr_.deoptimization_context_stack);
243  tlsPtr_.deoptimization_context_stack = record;
244}
245
246void Thread::PopDeoptimizationContext(JValue* result,
247                                      mirror::Throwable** exception,
248                                      bool* from_code) {
249  AssertHasDeoptimizationContext();
250  DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack;
251  tlsPtr_.deoptimization_context_stack = record->GetLink();
252  result->SetJ(record->GetReturnValue().GetJ());
253  *exception = record->GetPendingException();
254  *from_code = record->GetFromCode();
255  delete record;
256}
257
258void Thread::AssertHasDeoptimizationContext() {
259  CHECK(tlsPtr_.deoptimization_context_stack != nullptr)
260      << "No deoptimization context for thread " << *this;
261}
262
263void Thread::PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type) {
264  StackedShadowFrameRecord* record = new StackedShadowFrameRecord(
265      sf, type, tlsPtr_.stacked_shadow_frame_record);
266  tlsPtr_.stacked_shadow_frame_record = record;
267}
268
269ShadowFrame* Thread::PopStackedShadowFrame(StackedShadowFrameType type, bool must_be_present) {
270  StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record;
271  if (must_be_present) {
272    DCHECK(record != nullptr);
273    DCHECK_EQ(record->GetType(), type);
274  } else {
275    if (record == nullptr || record->GetType() != type) {
276      return nullptr;
277    }
278  }
279  tlsPtr_.stacked_shadow_frame_record = record->GetLink();
280  ShadowFrame* shadow_frame = record->GetShadowFrame();
281  delete record;
282  return shadow_frame;
283}
284
285class FrameIdToShadowFrame {
286 public:
287  static FrameIdToShadowFrame* Create(size_t frame_id,
288                                      ShadowFrame* shadow_frame,
289                                      FrameIdToShadowFrame* next,
290                                      size_t num_vregs) {
291    // Append a bool array at the end to keep track of what vregs are updated by the debugger.
292    uint8_t* memory = new uint8_t[sizeof(FrameIdToShadowFrame) + sizeof(bool) * num_vregs];
293    return new (memory) FrameIdToShadowFrame(frame_id, shadow_frame, next);
294  }
295
296  static void Delete(FrameIdToShadowFrame* f) {
297    uint8_t* memory = reinterpret_cast<uint8_t*>(f);
298    delete[] memory;
299  }
300
301  size_t GetFrameId() const { return frame_id_; }
302  ShadowFrame* GetShadowFrame() const { return shadow_frame_; }
303  FrameIdToShadowFrame* GetNext() const { return next_; }
304  void SetNext(FrameIdToShadowFrame* next) { next_ = next; }
305  bool* GetUpdatedVRegFlags() {
306    return updated_vreg_flags_;
307  }
308
309 private:
310  FrameIdToShadowFrame(size_t frame_id,
311                       ShadowFrame* shadow_frame,
312                       FrameIdToShadowFrame* next)
313      : frame_id_(frame_id),
314        shadow_frame_(shadow_frame),
315        next_(next) {}
316
317  const size_t frame_id_;
318  ShadowFrame* const shadow_frame_;
319  FrameIdToShadowFrame* next_;
320  bool updated_vreg_flags_[0];
321
322  DISALLOW_COPY_AND_ASSIGN(FrameIdToShadowFrame);
323};
324
325static FrameIdToShadowFrame* FindFrameIdToShadowFrame(FrameIdToShadowFrame* head,
326                                                      size_t frame_id) {
327  FrameIdToShadowFrame* found = nullptr;
328  for (FrameIdToShadowFrame* record = head; record != nullptr; record = record->GetNext()) {
329    if (record->GetFrameId() == frame_id) {
330      if (kIsDebugBuild) {
331        // Sanity check we have at most one record for this frame.
332        CHECK(found == nullptr) << "Multiple records for the frame " << frame_id;
333        found = record;
334      } else {
335        return record;
336      }
337    }
338  }
339  return found;
340}
341
342ShadowFrame* Thread::FindDebuggerShadowFrame(size_t frame_id) {
343  FrameIdToShadowFrame* record = FindFrameIdToShadowFrame(
344      tlsPtr_.frame_id_to_shadow_frame, frame_id);
345  if (record != nullptr) {
346    return record->GetShadowFrame();
347  }
348  return nullptr;
349}
350
351// Must only be called when FindDebuggerShadowFrame(frame_id) returns non-nullptr.
352bool* Thread::GetUpdatedVRegFlags(size_t frame_id) {
353  FrameIdToShadowFrame* record = FindFrameIdToShadowFrame(
354      tlsPtr_.frame_id_to_shadow_frame, frame_id);
355  CHECK(record != nullptr);
356  return record->GetUpdatedVRegFlags();
357}
358
359ShadowFrame* Thread::FindOrCreateDebuggerShadowFrame(size_t frame_id,
360                                                     uint32_t num_vregs,
361                                                     ArtMethod* method,
362                                                     uint32_t dex_pc) {
363  ShadowFrame* shadow_frame = FindDebuggerShadowFrame(frame_id);
364  if (shadow_frame != nullptr) {
365    return shadow_frame;
366  }
367  VLOG(deopt) << "Create pre-deopted ShadowFrame for " << PrettyMethod(method);
368  shadow_frame = ShadowFrame::CreateDeoptimizedFrame(num_vregs, nullptr, method, dex_pc);
369  FrameIdToShadowFrame* record = FrameIdToShadowFrame::Create(frame_id,
370                                                              shadow_frame,
371                                                              tlsPtr_.frame_id_to_shadow_frame,
372                                                              num_vregs);
373  for (uint32_t i = 0; i < num_vregs; i++) {
374    // Do this to clear all references for root visitors.
375    shadow_frame->SetVRegReference(i, nullptr);
376    // This flag will be changed to true if the debugger modifies the value.
377    record->GetUpdatedVRegFlags()[i] = false;
378  }
379  tlsPtr_.frame_id_to_shadow_frame = record;
380  return shadow_frame;
381}
382
383void Thread::RemoveDebuggerShadowFrameMapping(size_t frame_id) {
384  FrameIdToShadowFrame* head = tlsPtr_.frame_id_to_shadow_frame;
385  if (head->GetFrameId() == frame_id) {
386    tlsPtr_.frame_id_to_shadow_frame = head->GetNext();
387    FrameIdToShadowFrame::Delete(head);
388    return;
389  }
390  FrameIdToShadowFrame* prev = head;
391  for (FrameIdToShadowFrame* record = head->GetNext();
392       record != nullptr;
393       prev = record, record = record->GetNext()) {
394    if (record->GetFrameId() == frame_id) {
395      prev->SetNext(record->GetNext());
396      FrameIdToShadowFrame::Delete(record);
397      return;
398    }
399  }
400  LOG(FATAL) << "No shadow frame for frame " << frame_id;
401  UNREACHABLE();
402}
403
404void Thread::InitTid() {
405  tls32_.tid = ::art::GetTid();
406}
407
408void Thread::InitAfterFork() {
409  // One thread (us) survived the fork, but we have a new tid so we need to
410  // update the value stashed in this Thread*.
411  InitTid();
412}
413
414void* Thread::CreateCallback(void* arg) {
415  Thread* self = reinterpret_cast<Thread*>(arg);
416  Runtime* runtime = Runtime::Current();
417  if (runtime == nullptr) {
418    LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self;
419    return nullptr;
420  }
421  {
422    // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true
423    //       after self->Init().
424    MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
425    // Check that if we got here we cannot be shutting down (as shutdown should never have started
426    // while threads are being born).
427    CHECK(!runtime->IsShuttingDownLocked());
428    // Note: given that the JNIEnv is created in the parent thread, the only failure point here is
429    //       a mess in InitStackHwm. We do not have a reasonable way to recover from that, so abort
430    //       the runtime in such a case. In case this ever changes, we need to make sure here to
431    //       delete the tmp_jni_env, as we own it at this point.
432    CHECK(self->Init(runtime->GetThreadList(), runtime->GetJavaVM(), self->tlsPtr_.tmp_jni_env));
433    self->tlsPtr_.tmp_jni_env = nullptr;
434    Runtime::Current()->EndThreadBirth();
435  }
436  {
437    ScopedObjectAccess soa(self);
438    self->InitStringEntryPoints();
439
440    // Copy peer into self, deleting global reference when done.
441    CHECK(self->tlsPtr_.jpeer != nullptr);
442    self->tlsPtr_.opeer = soa.Decode<mirror::Object*>(self->tlsPtr_.jpeer);
443    self->GetJniEnv()->DeleteGlobalRef(self->tlsPtr_.jpeer);
444    self->tlsPtr_.jpeer = nullptr;
445    self->SetThreadName(self->GetThreadName(soa)->ToModifiedUtf8().c_str());
446
447    ArtField* priorityField = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority);
448    self->SetNativePriority(priorityField->GetInt(self->tlsPtr_.opeer));
449    Dbg::PostThreadStart(self);
450
451    // Invoke the 'run' method of our java.lang.Thread.
452    mirror::Object* receiver = self->tlsPtr_.opeer;
453    jmethodID mid = WellKnownClasses::java_lang_Thread_run;
454    ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
455    InvokeVirtualOrInterfaceWithJValues(soa, ref.get(), mid, nullptr);
456  }
457  // Detach and delete self.
458  Runtime::Current()->GetThreadList()->Unregister(self);
459
460  return nullptr;
461}
462
463Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
464                                  mirror::Object* thread_peer) {
465  ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer);
466  Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetLong(thread_peer)));
467  // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_
468  // to stop it from going away.
469  if (kIsDebugBuild) {
470    MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
471    if (result != nullptr && !result->IsSuspended()) {
472      Locks::thread_list_lock_->AssertHeld(soa.Self());
473    }
474  }
475  return result;
476}
477
478Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
479                                  jobject java_thread) {
480  return FromManagedThread(soa, soa.Decode<mirror::Object*>(java_thread));
481}
482
483static size_t FixStackSize(size_t stack_size) {
484  // A stack size of zero means "use the default".
485  if (stack_size == 0) {
486    stack_size = Runtime::Current()->GetDefaultStackSize();
487  }
488
489  // Dalvik used the bionic pthread default stack size for native threads,
490  // so include that here to support apps that expect large native stacks.
491  stack_size += 1 * MB;
492
493  // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN.
494  if (stack_size < PTHREAD_STACK_MIN) {
495    stack_size = PTHREAD_STACK_MIN;
496  }
497
498  if (Runtime::Current()->ExplicitStackOverflowChecks()) {
499    // It's likely that callers are trying to ensure they have at least a certain amount of
500    // stack space, so we should add our reserved space on top of what they requested, rather
501    // than implicitly take it away from them.
502    stack_size += GetStackOverflowReservedBytes(kRuntimeISA);
503  } else {
504    // If we are going to use implicit stack checks, allocate space for the protected
505    // region at the bottom of the stack.
506    stack_size += Thread::kStackOverflowImplicitCheckSize +
507        GetStackOverflowReservedBytes(kRuntimeISA);
508  }
509
510  // Some systems require the stack size to be a multiple of the system page size, so round up.
511  stack_size = RoundUp(stack_size, kPageSize);
512
513  return stack_size;
514}
515
516// Install a protected region in the stack.  This is used to trigger a SIGSEGV if a stack
517// overflow is detected.  It is located right below the stack_begin_.
518ATTRIBUTE_NO_SANITIZE_ADDRESS
519void Thread::InstallImplicitProtection() {
520  uint8_t* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
521  uint8_t* stack_himem = tlsPtr_.stack_end;
522  uint8_t* stack_top = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(&stack_himem) &
523      ~(kPageSize - 1));    // Page containing current top of stack.
524
525  // Try to directly protect the stack.
526  VLOG(threads) << "installing stack protected region at " << std::hex <<
527        static_cast<void*>(pregion) << " to " <<
528        static_cast<void*>(pregion + kStackOverflowProtectedSize - 1);
529  if (ProtectStack(/* fatal_on_error */ false)) {
530    // Tell the kernel that we won't be needing these pages any more.
531    // NB. madvise will probably write zeroes into the memory (on linux it does).
532    uint32_t unwanted_size = stack_top - pregion - kPageSize;
533    madvise(pregion, unwanted_size, MADV_DONTNEED);
534    return;
535  }
536
537  // There is a little complexity here that deserves a special mention.  On some
538  // architectures, the stack is created using a VM_GROWSDOWN flag
539  // to prevent memory being allocated when it's not needed.  This flag makes the
540  // kernel only allocate memory for the stack by growing down in memory.  Because we
541  // want to put an mprotected region far away from that at the stack top, we need
542  // to make sure the pages for the stack are mapped in before we call mprotect.
543  //
544  // The failed mprotect in UnprotectStack is an indication of a thread with VM_GROWSDOWN
545  // with a non-mapped stack (usually only the main thread).
546  //
547  // We map in the stack by reading every page from the stack bottom (highest address)
548  // to the stack top. (We then madvise this away.) This must be done by reading from the
549  // current stack pointer downwards. Any access more than a page below the current SP
550  // might cause a segv.
551  // TODO: This comment may be out of date. It seems possible to speed this up. As
552  //       this is normally done once in the zygote on startup, ignore for now.
553  //
554  // AddressSanitizer does not like the part of this functions that reads every stack page.
555  // Looks a lot like an out-of-bounds access.
556
557  // (Defensively) first remove the protection on the protected region as will want to read
558  // and write it. Ignore errors.
559  UnprotectStack();
560
561  VLOG(threads) << "Need to map in stack for thread at " << std::hex <<
562      static_cast<void*>(pregion);
563
564  // Read every page from the high address to the low.
565  volatile uint8_t dont_optimize_this;
566  UNUSED(dont_optimize_this);
567  for (uint8_t* p = stack_top; p >= pregion; p -= kPageSize) {
568    dont_optimize_this = *p;
569  }
570
571  VLOG(threads) << "(again) installing stack protected region at " << std::hex <<
572      static_cast<void*>(pregion) << " to " <<
573      static_cast<void*>(pregion + kStackOverflowProtectedSize - 1);
574
575  // Protect the bottom of the stack to prevent read/write to it.
576  ProtectStack(/* fatal_on_error */ true);
577
578  // Tell the kernel that we won't be needing these pages any more.
579  // NB. madvise will probably write zeroes into the memory (on linux it does).
580  uint32_t unwanted_size = stack_top - pregion - kPageSize;
581  madvise(pregion, unwanted_size, MADV_DONTNEED);
582}
583
584void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) {
585  CHECK(java_peer != nullptr);
586  Thread* self = static_cast<JNIEnvExt*>(env)->self;
587
588  if (VLOG_IS_ON(threads)) {
589    ScopedObjectAccess soa(env);
590
591    ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
592    mirror::String* java_name = reinterpret_cast<mirror::String*>(f->GetObject(
593        soa.Decode<mirror::Object*>(java_peer)));
594    std::string thread_name;
595    if (java_name != nullptr) {
596      thread_name = java_name->ToModifiedUtf8();
597    } else {
598      thread_name = "(Unnamed)";
599    }
600
601    VLOG(threads) << "Creating native thread for " << thread_name;
602    self->Dump(LOG(INFO));
603  }
604
605  Runtime* runtime = Runtime::Current();
606
607  // Atomically start the birth of the thread ensuring the runtime isn't shutting down.
608  bool thread_start_during_shutdown = false;
609  {
610    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
611    if (runtime->IsShuttingDownLocked()) {
612      thread_start_during_shutdown = true;
613    } else {
614      runtime->StartThreadBirth();
615    }
616  }
617  if (thread_start_during_shutdown) {
618    ScopedLocalRef<jclass> error_class(env, env->FindClass("java/lang/InternalError"));
619    env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown");
620    return;
621  }
622
623  Thread* child_thread = new Thread(is_daemon);
624  // Use global JNI ref to hold peer live while child thread starts.
625  child_thread->tlsPtr_.jpeer = env->NewGlobalRef(java_peer);
626  stack_size = FixStackSize(stack_size);
627
628  // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing to
629  // assign it.
630  env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer,
631                    reinterpret_cast<jlong>(child_thread));
632
633  // Try to allocate a JNIEnvExt for the thread. We do this here as we might be out of memory and
634  // do not have a good way to report this on the child's side.
635  std::unique_ptr<JNIEnvExt> child_jni_env_ext(
636      JNIEnvExt::Create(child_thread, Runtime::Current()->GetJavaVM()));
637
638  int pthread_create_result = 0;
639  if (child_jni_env_ext.get() != nullptr) {
640    pthread_t new_pthread;
641    pthread_attr_t attr;
642    child_thread->tlsPtr_.tmp_jni_env = child_jni_env_ext.get();
643    CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread");
644    CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED),
645                       "PTHREAD_CREATE_DETACHED");
646    CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size);
647    pthread_create_result = pthread_create(&new_pthread,
648                                           &attr,
649                                           Thread::CreateCallback,
650                                           child_thread);
651    CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread");
652
653    if (pthread_create_result == 0) {
654      // pthread_create started the new thread. The child is now responsible for managing the
655      // JNIEnvExt we created.
656      // Note: we can't check for tmp_jni_env == nullptr, as that would require synchronization
657      //       between the threads.
658      child_jni_env_ext.release();
659      return;
660    }
661  }
662
663  // Either JNIEnvExt::Create or pthread_create(3) failed, so clean up.
664  {
665    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
666    runtime->EndThreadBirth();
667  }
668  // Manually delete the global reference since Thread::Init will not have been run.
669  env->DeleteGlobalRef(child_thread->tlsPtr_.jpeer);
670  child_thread->tlsPtr_.jpeer = nullptr;
671  delete child_thread;
672  child_thread = nullptr;
673  // TODO: remove from thread group?
674  env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 0);
675  {
676    std::string msg(child_jni_env_ext.get() == nullptr ?
677        "Could not allocate JNI Env" :
678        StringPrintf("pthread_create (%s stack) failed: %s",
679                                 PrettySize(stack_size).c_str(), strerror(pthread_create_result)));
680    ScopedObjectAccess soa(env);
681    soa.Self()->ThrowOutOfMemoryError(msg.c_str());
682  }
683}
684
685bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_env_ext) {
686  // This function does all the initialization that must be run by the native thread it applies to.
687  // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so
688  // we can handshake with the corresponding native thread when it's ready.) Check this native
689  // thread hasn't been through here already...
690  CHECK(Thread::Current() == nullptr);
691
692  // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
693  // avoids pthread_self_ ever being invalid when discovered from Thread::Current().
694  tlsPtr_.pthread_self = pthread_self();
695  CHECK(is_started_);
696
697  SetUpAlternateSignalStack();
698  if (!InitStackHwm()) {
699    return false;
700  }
701  InitCpu();
702  InitTlsEntryPoints();
703  RemoveSuspendTrigger();
704  InitCardTable();
705  InitTid();
706  interpreter::InitInterpreterTls(this);
707
708#ifdef __ANDROID__
709  __get_tls()[TLS_SLOT_ART_THREAD_SELF] = this;
710#else
711  CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self");
712#endif
713  DCHECK_EQ(Thread::Current(), this);
714
715  tls32_.thin_lock_thread_id = thread_list->AllocThreadId(this);
716
717  if (jni_env_ext != nullptr) {
718    DCHECK_EQ(jni_env_ext->vm, java_vm);
719    DCHECK_EQ(jni_env_ext->self, this);
720    tlsPtr_.jni_env = jni_env_ext;
721  } else {
722    tlsPtr_.jni_env = JNIEnvExt::Create(this, java_vm);
723    if (tlsPtr_.jni_env == nullptr) {
724      return false;
725    }
726  }
727
728  thread_list->Register(this);
729  return true;
730}
731
732Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_group,
733                       bool create_peer) {
734  Runtime* runtime = Runtime::Current();
735  if (runtime == nullptr) {
736    LOG(ERROR) << "Thread attaching to non-existent runtime: " << thread_name;
737    return nullptr;
738  }
739  Thread* self;
740  {
741    MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
742    if (runtime->IsShuttingDownLocked()) {
743      LOG(WARNING) << "Thread attaching while runtime is shutting down: " << thread_name;
744      return nullptr;
745    } else {
746      Runtime::Current()->StartThreadBirth();
747      self = new Thread(as_daemon);
748      bool init_success = self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
749      Runtime::Current()->EndThreadBirth();
750      if (!init_success) {
751        delete self;
752        return nullptr;
753      }
754    }
755  }
756
757  self->InitStringEntryPoints();
758
759  CHECK_NE(self->GetState(), kRunnable);
760  self->SetState(kNative);
761
762  // If we're the main thread, ClassLinker won't be created until after we're attached,
763  // so that thread needs a two-stage attach. Regular threads don't need this hack.
764  // In the compiler, all threads need this hack, because no-one's going to be getting
765  // a native peer!
766  if (create_peer) {
767    self->CreatePeer(thread_name, as_daemon, thread_group);
768    if (self->IsExceptionPending()) {
769      // We cannot keep the exception around, as we're deleting self. Try to be helpful and log it.
770      {
771        ScopedObjectAccess soa(self);
772        LOG(ERROR) << "Exception creating thread peer:";
773        LOG(ERROR) << self->GetException()->Dump();
774        self->ClearException();
775      }
776      runtime->GetThreadList()->Unregister(self);
777      // Unregister deletes self, no need to do this here.
778      return nullptr;
779    }
780  } else {
781    // These aren't necessary, but they improve diagnostics for unit tests & command-line tools.
782    if (thread_name != nullptr) {
783      self->tlsPtr_.name->assign(thread_name);
784      ::art::SetThreadName(thread_name);
785    } else if (self->GetJniEnv()->check_jni) {
786      LOG(WARNING) << *Thread::Current() << " attached without supplying a name";
787    }
788  }
789
790  if (VLOG_IS_ON(threads)) {
791    if (thread_name != nullptr) {
792      VLOG(threads) << "Attaching thread " << thread_name;
793    } else {
794      VLOG(threads) << "Attaching unnamed thread.";
795    }
796    ScopedObjectAccess soa(self);
797    self->Dump(LOG(INFO));
798  }
799
800  {
801    ScopedObjectAccess soa(self);
802    Dbg::PostThreadStart(self);
803  }
804
805  return self;
806}
807
808void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) {
809  Runtime* runtime = Runtime::Current();
810  CHECK(runtime->IsStarted());
811  JNIEnv* env = tlsPtr_.jni_env;
812
813  if (thread_group == nullptr) {
814    thread_group = runtime->GetMainThreadGroup();
815  }
816  ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name));
817  // Add missing null check in case of OOM b/18297817
818  if (name != nullptr && thread_name.get() == nullptr) {
819    CHECK(IsExceptionPending());
820    return;
821  }
822  jint thread_priority = GetNativePriority();
823  jboolean thread_is_daemon = as_daemon;
824
825  ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread));
826  if (peer.get() == nullptr) {
827    CHECK(IsExceptionPending());
828    return;
829  }
830  {
831    ScopedObjectAccess soa(this);
832    tlsPtr_.opeer = soa.Decode<mirror::Object*>(peer.get());
833  }
834  env->CallNonvirtualVoidMethod(peer.get(),
835                                WellKnownClasses::java_lang_Thread,
836                                WellKnownClasses::java_lang_Thread_init,
837                                thread_group, thread_name.get(), thread_priority, thread_is_daemon);
838  if (IsExceptionPending()) {
839    return;
840  }
841
842  Thread* self = this;
843  DCHECK_EQ(self, Thread::Current());
844  env->SetLongField(peer.get(), WellKnownClasses::java_lang_Thread_nativePeer,
845                    reinterpret_cast<jlong>(self));
846
847  ScopedObjectAccess soa(self);
848  StackHandleScope<1> hs(self);
849  MutableHandle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName(soa)));
850  if (peer_thread_name.Get() == nullptr) {
851    // The Thread constructor should have set the Thread.name to a
852    // non-null value. However, because we can run without code
853    // available (in the compiler, in tests), we manually assign the
854    // fields the constructor should have set.
855    if (runtime->IsActiveTransaction()) {
856      InitPeer<true>(soa, thread_is_daemon, thread_group, thread_name.get(), thread_priority);
857    } else {
858      InitPeer<false>(soa, thread_is_daemon, thread_group, thread_name.get(), thread_priority);
859    }
860    peer_thread_name.Assign(GetThreadName(soa));
861  }
862  // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null.
863  if (peer_thread_name.Get() != nullptr) {
864    SetThreadName(peer_thread_name->ToModifiedUtf8().c_str());
865  }
866}
867
868template<bool kTransactionActive>
869void Thread::InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
870                      jobject thread_name, jint thread_priority) {
871  soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->
872      SetBoolean<kTransactionActive>(tlsPtr_.opeer, thread_is_daemon);
873  soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->
874      SetObject<kTransactionActive>(tlsPtr_.opeer, soa.Decode<mirror::Object*>(thread_group));
875  soa.DecodeField(WellKnownClasses::java_lang_Thread_name)->
876      SetObject<kTransactionActive>(tlsPtr_.opeer, soa.Decode<mirror::Object*>(thread_name));
877  soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->
878      SetInt<kTransactionActive>(tlsPtr_.opeer, thread_priority);
879}
880
881void Thread::SetThreadName(const char* name) {
882  tlsPtr_.name->assign(name);
883  ::art::SetThreadName(name);
884  Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM"));
885}
886
887bool Thread::InitStackHwm() {
888  void* read_stack_base;
889  size_t read_stack_size;
890  size_t read_guard_size;
891  GetThreadStack(tlsPtr_.pthread_self, &read_stack_base, &read_stack_size, &read_guard_size);
892
893  tlsPtr_.stack_begin = reinterpret_cast<uint8_t*>(read_stack_base);
894  tlsPtr_.stack_size = read_stack_size;
895
896  // The minimum stack size we can cope with is the overflow reserved bytes (typically
897  // 8K) + the protected region size (4K) + another page (4K).  Typically this will
898  // be 8+4+4 = 16K.  The thread won't be able to do much with this stack even the GC takes
899  // between 8K and 12K.
900  uint32_t min_stack = GetStackOverflowReservedBytes(kRuntimeISA) + kStackOverflowProtectedSize
901    + 4 * KB;
902  if (read_stack_size <= min_stack) {
903    // Note, as we know the stack is small, avoid operations that could use a lot of stack.
904    LogMessage::LogLineLowStack(__PRETTY_FUNCTION__, __LINE__, ERROR,
905                                "Attempt to attach a thread with a too-small stack");
906    return false;
907  }
908
909  // This is included in the SIGQUIT output, but it's useful here for thread debugging.
910  VLOG(threads) << StringPrintf("Native stack is at %p (%s with %s guard)",
911                                read_stack_base,
912                                PrettySize(read_stack_size).c_str(),
913                                PrettySize(read_guard_size).c_str());
914
915  // Set stack_end_ to the bottom of the stack saving space of stack overflows
916
917  Runtime* runtime = Runtime::Current();
918  bool implicit_stack_check = !runtime->ExplicitStackOverflowChecks() && !runtime->IsAotCompiler();
919  ResetDefaultStackEnd();
920
921  // Install the protected region if we are doing implicit overflow checks.
922  if (implicit_stack_check) {
923    // The thread might have protected region at the bottom.  We need
924    // to install our own region so we need to move the limits
925    // of the stack to make room for it.
926
927    tlsPtr_.stack_begin += read_guard_size + kStackOverflowProtectedSize;
928    tlsPtr_.stack_end += read_guard_size + kStackOverflowProtectedSize;
929    tlsPtr_.stack_size -= read_guard_size;
930
931    InstallImplicitProtection();
932  }
933
934  // Sanity check.
935  int stack_variable;
936  CHECK_GT(&stack_variable, reinterpret_cast<void*>(tlsPtr_.stack_end));
937
938  return true;
939}
940
941void Thread::ShortDump(std::ostream& os) const {
942  os << "Thread[";
943  if (GetThreadId() != 0) {
944    // If we're in kStarting, we won't have a thin lock id or tid yet.
945    os << GetThreadId()
946       << ",tid=" << GetTid() << ',';
947  }
948  os << GetState()
949     << ",Thread*=" << this
950     << ",peer=" << tlsPtr_.opeer
951     << ",\"" << (tlsPtr_.name != nullptr ? *tlsPtr_.name : "null") << "\""
952     << "]";
953}
954
955void Thread::Dump(std::ostream& os, bool dump_native_stack, BacktraceMap* backtrace_map) const {
956  DumpState(os);
957  DumpStack(os, dump_native_stack, backtrace_map);
958}
959
960mirror::String* Thread::GetThreadName(const ScopedObjectAccessAlreadyRunnable& soa) const {
961  ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
962  return (tlsPtr_.opeer != nullptr) ?
963      reinterpret_cast<mirror::String*>(f->GetObject(tlsPtr_.opeer)) : nullptr;
964}
965
966void Thread::GetThreadName(std::string& name) const {
967  name.assign(*tlsPtr_.name);
968}
969
970uint64_t Thread::GetCpuMicroTime() const {
971#if defined(__linux__)
972  clockid_t cpu_clock_id;
973  pthread_getcpuclockid(tlsPtr_.pthread_self, &cpu_clock_id);
974  timespec now;
975  clock_gettime(cpu_clock_id, &now);
976  return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_nsec / UINT64_C(1000);
977#else  // __APPLE__
978  UNIMPLEMENTED(WARNING);
979  return -1;
980#endif
981}
982
983// Attempt to rectify locks so that we dump thread list with required locks before exiting.
984static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
985  LOG(ERROR) << *thread << " suspend count already zero.";
986  Locks::thread_suspend_count_lock_->Unlock(self);
987  if (!Locks::mutator_lock_->IsSharedHeld(self)) {
988    Locks::mutator_lock_->SharedTryLock(self);
989    if (!Locks::mutator_lock_->IsSharedHeld(self)) {
990      LOG(WARNING) << "Dumping thread list without holding mutator_lock_";
991    }
992  }
993  if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
994    Locks::thread_list_lock_->TryLock(self);
995    if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
996      LOG(WARNING) << "Dumping thread list without holding thread_list_lock_";
997    }
998  }
999  std::ostringstream ss;
1000  Runtime::Current()->GetThreadList()->Dump(ss);
1001  LOG(FATAL) << ss.str();
1002}
1003
1004bool Thread::ModifySuspendCount(Thread* self, int delta, AtomicInteger* suspend_barrier,
1005                                bool for_debugger) {
1006  if (kIsDebugBuild) {
1007    DCHECK(delta == -1 || delta == +1 || delta == -tls32_.debug_suspend_count)
1008          << delta << " " << tls32_.debug_suspend_count << " " << this;
1009    DCHECK_GE(tls32_.suspend_count, tls32_.debug_suspend_count) << this;
1010    Locks::thread_suspend_count_lock_->AssertHeld(self);
1011    if (this != self && !IsSuspended()) {
1012      Locks::thread_list_lock_->AssertHeld(self);
1013    }
1014  }
1015  if (UNLIKELY(delta < 0 && tls32_.suspend_count <= 0)) {
1016    UnsafeLogFatalForSuspendCount(self, this);
1017    return false;
1018  }
1019
1020  uint16_t flags = kSuspendRequest;
1021  if (delta > 0 && suspend_barrier != nullptr) {
1022    uint32_t available_barrier = kMaxSuspendBarriers;
1023    for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) {
1024      if (tlsPtr_.active_suspend_barriers[i] == nullptr) {
1025        available_barrier = i;
1026        break;
1027      }
1028    }
1029    if (available_barrier == kMaxSuspendBarriers) {
1030      // No barrier spaces available, we can't add another.
1031      return false;
1032    }
1033    tlsPtr_.active_suspend_barriers[available_barrier] = suspend_barrier;
1034    flags |= kActiveSuspendBarrier;
1035  }
1036
1037  tls32_.suspend_count += delta;
1038  if (for_debugger) {
1039    tls32_.debug_suspend_count += delta;
1040  }
1041
1042  if (tls32_.suspend_count == 0) {
1043    AtomicClearFlag(kSuspendRequest);
1044  } else {
1045    // Two bits might be set simultaneously.
1046    tls32_.state_and_flags.as_atomic_int.FetchAndOrSequentiallyConsistent(flags);
1047    TriggerSuspend();
1048  }
1049  return true;
1050}
1051
1052bool Thread::PassActiveSuspendBarriers(Thread* self) {
1053  // Grab the suspend_count lock and copy the current set of
1054  // barriers. Then clear the list and the flag. The ModifySuspendCount
1055  // function requires the lock so we prevent a race between setting
1056  // the kActiveSuspendBarrier flag and clearing it.
1057  AtomicInteger* pass_barriers[kMaxSuspendBarriers];
1058  {
1059    MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1060    if (!ReadFlag(kActiveSuspendBarrier)) {
1061      // quick exit test: the barriers have already been claimed - this is
1062      // possible as there may be a race to claim and it doesn't matter
1063      // who wins.
1064      // All of the callers of this function (except the SuspendAllInternal)
1065      // will first test the kActiveSuspendBarrier flag without lock. Here
1066      // double-check whether the barrier has been passed with the
1067      // suspend_count lock.
1068      return false;
1069    }
1070
1071    for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) {
1072      pass_barriers[i] = tlsPtr_.active_suspend_barriers[i];
1073      tlsPtr_.active_suspend_barriers[i] = nullptr;
1074    }
1075    AtomicClearFlag(kActiveSuspendBarrier);
1076  }
1077
1078  uint32_t barrier_count = 0;
1079  for (uint32_t i = 0; i < kMaxSuspendBarriers; i++) {
1080    AtomicInteger* pending_threads = pass_barriers[i];
1081    if (pending_threads != nullptr) {
1082      bool done = false;
1083      do {
1084        int32_t cur_val = pending_threads->LoadRelaxed();
1085        CHECK_GT(cur_val, 0) << "Unexpected value for PassActiveSuspendBarriers(): " << cur_val;
1086        // Reduce value by 1.
1087        done = pending_threads->CompareExchangeWeakRelaxed(cur_val, cur_val - 1);
1088#if ART_USE_FUTEXES
1089        if (done && (cur_val - 1) == 0) {  // Weak CAS may fail spuriously.
1090          futex(pending_threads->Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
1091        }
1092#endif
1093      } while (!done);
1094      ++barrier_count;
1095    }
1096  }
1097  CHECK_GT(barrier_count, 0U);
1098  return true;
1099}
1100
1101void Thread::ClearSuspendBarrier(AtomicInteger* target) {
1102  CHECK(ReadFlag(kActiveSuspendBarrier));
1103  bool clear_flag = true;
1104  for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) {
1105    AtomicInteger* ptr = tlsPtr_.active_suspend_barriers[i];
1106    if (ptr == target) {
1107      tlsPtr_.active_suspend_barriers[i] = nullptr;
1108    } else if (ptr != nullptr) {
1109      clear_flag = false;
1110    }
1111  }
1112  if (LIKELY(clear_flag)) {
1113    AtomicClearFlag(kActiveSuspendBarrier);
1114  }
1115}
1116
1117void Thread::RunCheckpointFunction() {
1118  Closure *checkpoints[kMaxCheckpoints];
1119
1120  // Grab the suspend_count lock and copy the current set of
1121  // checkpoints.  Then clear the list and the flag.  The RequestCheckpoint
1122  // function will also grab this lock so we prevent a race between setting
1123  // the kCheckpointRequest flag and clearing it.
1124  {
1125    MutexLock mu(this, *Locks::thread_suspend_count_lock_);
1126    for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
1127      checkpoints[i] = tlsPtr_.checkpoint_functions[i];
1128      tlsPtr_.checkpoint_functions[i] = nullptr;
1129    }
1130    AtomicClearFlag(kCheckpointRequest);
1131  }
1132
1133  // Outside the lock, run all the checkpoint functions that
1134  // we collected.
1135  bool found_checkpoint = false;
1136  for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
1137    if (checkpoints[i] != nullptr) {
1138      ScopedTrace trace("Run checkpoint function");
1139      checkpoints[i]->Run(this);
1140      found_checkpoint = true;
1141    }
1142  }
1143  CHECK(found_checkpoint);
1144}
1145
1146bool Thread::RequestCheckpoint(Closure* function) {
1147  union StateAndFlags old_state_and_flags;
1148  old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
1149  if (old_state_and_flags.as_struct.state != kRunnable) {
1150    return false;  // Fail, thread is suspended and so can't run a checkpoint.
1151  }
1152
1153  uint32_t available_checkpoint = kMaxCheckpoints;
1154  for (uint32_t i = 0 ; i < kMaxCheckpoints; ++i) {
1155    if (tlsPtr_.checkpoint_functions[i] == nullptr) {
1156      available_checkpoint = i;
1157      break;
1158    }
1159  }
1160  if (available_checkpoint == kMaxCheckpoints) {
1161    // No checkpoint functions available, we can't run a checkpoint
1162    return false;
1163  }
1164  tlsPtr_.checkpoint_functions[available_checkpoint] = function;
1165
1166  // Checkpoint function installed now install flag bit.
1167  // We must be runnable to request a checkpoint.
1168  DCHECK_EQ(old_state_and_flags.as_struct.state, kRunnable);
1169  union StateAndFlags new_state_and_flags;
1170  new_state_and_flags.as_int = old_state_and_flags.as_int;
1171  new_state_and_flags.as_struct.flags |= kCheckpointRequest;
1172  bool success = tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent(
1173      old_state_and_flags.as_int, new_state_and_flags.as_int);
1174  if (UNLIKELY(!success)) {
1175    // The thread changed state before the checkpoint was installed.
1176    CHECK_EQ(tlsPtr_.checkpoint_functions[available_checkpoint], function);
1177    tlsPtr_.checkpoint_functions[available_checkpoint] = nullptr;
1178  } else {
1179    CHECK_EQ(ReadFlag(kCheckpointRequest), true);
1180    TriggerSuspend();
1181  }
1182  return success;
1183}
1184
1185Closure* Thread::GetFlipFunction() {
1186  Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function);
1187  Closure* func;
1188  do {
1189    func = atomic_func->LoadRelaxed();
1190    if (func == nullptr) {
1191      return nullptr;
1192    }
1193  } while (!atomic_func->CompareExchangeWeakSequentiallyConsistent(func, nullptr));
1194  DCHECK(func != nullptr);
1195  return func;
1196}
1197
1198void Thread::SetFlipFunction(Closure* function) {
1199  CHECK(function != nullptr);
1200  Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function);
1201  atomic_func->StoreSequentiallyConsistent(function);
1202}
1203
1204void Thread::FullSuspendCheck() {
1205  ScopedTrace trace(__FUNCTION__);
1206  VLOG(threads) << this << " self-suspending";
1207  // Make thread appear suspended to other threads, release mutator_lock_.
1208  tls32_.suspended_at_suspend_check = true;
1209  // Transition to suspended and back to runnable, re-acquire share on mutator_lock_.
1210  ScopedThreadSuspension(this, kSuspended);
1211  tls32_.suspended_at_suspend_check = false;
1212  VLOG(threads) << this << " self-reviving";
1213}
1214
1215void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
1216  std::string group_name;
1217  int priority;
1218  bool is_daemon = false;
1219  Thread* self = Thread::Current();
1220
1221  // If flip_function is not null, it means we have run a checkpoint
1222  // before the thread wakes up to execute the flip function and the
1223  // thread roots haven't been forwarded.  So the following access to
1224  // the roots (opeer or methods in the frames) would be bad. Run it
1225  // here. TODO: clean up.
1226  if (thread != nullptr) {
1227    ScopedObjectAccessUnchecked soa(self);
1228    Thread* this_thread = const_cast<Thread*>(thread);
1229    Closure* flip_func = this_thread->GetFlipFunction();
1230    if (flip_func != nullptr) {
1231      flip_func->Run(this_thread);
1232    }
1233  }
1234
1235  // Don't do this if we are aborting since the GC may have all the threads suspended. This will
1236  // cause ScopedObjectAccessUnchecked to deadlock.
1237  if (gAborting == 0 && self != nullptr && thread != nullptr && thread->tlsPtr_.opeer != nullptr) {
1238    ScopedObjectAccessUnchecked soa(self);
1239    priority = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)
1240        ->GetInt(thread->tlsPtr_.opeer);
1241    is_daemon = soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)
1242        ->GetBoolean(thread->tlsPtr_.opeer);
1243
1244    mirror::Object* thread_group =
1245        soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(thread->tlsPtr_.opeer);
1246
1247    if (thread_group != nullptr) {
1248      ArtField* group_name_field =
1249          soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
1250      mirror::String* group_name_string =
1251          reinterpret_cast<mirror::String*>(group_name_field->GetObject(thread_group));
1252      group_name = (group_name_string != nullptr) ? group_name_string->ToModifiedUtf8() : "<null>";
1253    }
1254  } else {
1255    priority = GetNativePriority();
1256  }
1257
1258  std::string scheduler_group_name(GetSchedulerGroupName(tid));
1259  if (scheduler_group_name.empty()) {
1260    scheduler_group_name = "default";
1261  }
1262
1263  if (thread != nullptr) {
1264    os << '"' << *thread->tlsPtr_.name << '"';
1265    if (is_daemon) {
1266      os << " daemon";
1267    }
1268    os << " prio=" << priority
1269       << " tid=" << thread->GetThreadId()
1270       << " " << thread->GetState();
1271    if (thread->IsStillStarting()) {
1272      os << " (still starting up)";
1273    }
1274    os << "\n";
1275  } else {
1276    os << '"' << ::art::GetThreadName(tid) << '"'
1277       << " prio=" << priority
1278       << " (not attached)\n";
1279  }
1280
1281  if (thread != nullptr) {
1282    MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1283    os << "  | group=\"" << group_name << "\""
1284       << " sCount=" << thread->tls32_.suspend_count
1285       << " dsCount=" << thread->tls32_.debug_suspend_count
1286       << " obj=" << reinterpret_cast<void*>(thread->tlsPtr_.opeer)
1287       << " self=" << reinterpret_cast<const void*>(thread) << "\n";
1288  }
1289
1290  os << "  | sysTid=" << tid
1291     << " nice=" << getpriority(PRIO_PROCESS, tid)
1292     << " cgrp=" << scheduler_group_name;
1293  if (thread != nullptr) {
1294    int policy;
1295    sched_param sp;
1296    CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->tlsPtr_.pthread_self, &policy, &sp),
1297                       __FUNCTION__);
1298    os << " sched=" << policy << "/" << sp.sched_priority
1299       << " handle=" << reinterpret_cast<void*>(thread->tlsPtr_.pthread_self);
1300  }
1301  os << "\n";
1302
1303  // Grab the scheduler stats for this thread.
1304  std::string scheduler_stats;
1305  if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)) {
1306    scheduler_stats.resize(scheduler_stats.size() - 1);  // Lose the trailing '\n'.
1307  } else {
1308    scheduler_stats = "0 0 0";
1309  }
1310
1311  char native_thread_state = '?';
1312  int utime = 0;
1313  int stime = 0;
1314  int task_cpu = 0;
1315  GetTaskStats(tid, &native_thread_state, &utime, &stime, &task_cpu);
1316
1317  os << "  | state=" << native_thread_state
1318     << " schedstat=( " << scheduler_stats << " )"
1319     << " utm=" << utime
1320     << " stm=" << stime
1321     << " core=" << task_cpu
1322     << " HZ=" << sysconf(_SC_CLK_TCK) << "\n";
1323  if (thread != nullptr) {
1324    os << "  | stack=" << reinterpret_cast<void*>(thread->tlsPtr_.stack_begin) << "-"
1325        << reinterpret_cast<void*>(thread->tlsPtr_.stack_end) << " stackSize="
1326        << PrettySize(thread->tlsPtr_.stack_size) << "\n";
1327    // Dump the held mutexes.
1328    os << "  | held mutexes=";
1329    for (size_t i = 0; i < kLockLevelCount; ++i) {
1330      if (i != kMonitorLock) {
1331        BaseMutex* mutex = thread->GetHeldMutex(static_cast<LockLevel>(i));
1332        if (mutex != nullptr) {
1333          os << " \"" << mutex->GetName() << "\"";
1334          if (mutex->IsReaderWriterMutex()) {
1335            ReaderWriterMutex* rw_mutex = down_cast<ReaderWriterMutex*>(mutex);
1336            if (rw_mutex->GetExclusiveOwnerTid() == static_cast<uint64_t>(tid)) {
1337              os << "(exclusive held)";
1338            } else {
1339              os << "(shared held)";
1340            }
1341          }
1342        }
1343      }
1344    }
1345    os << "\n";
1346  }
1347}
1348
1349void Thread::DumpState(std::ostream& os) const {
1350  Thread::DumpState(os, this, GetTid());
1351}
1352
1353struct StackDumpVisitor : public StackVisitor {
1354  StackDumpVisitor(std::ostream& os_in, Thread* thread_in, Context* context, bool can_allocate_in)
1355      SHARED_REQUIRES(Locks::mutator_lock_)
1356      : StackVisitor(thread_in, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
1357        os(os_in),
1358        can_allocate(can_allocate_in),
1359        last_method(nullptr),
1360        last_line_number(0),
1361        repetition_count(0),
1362        frame_count(0) {}
1363
1364  virtual ~StackDumpVisitor() {
1365    if (frame_count == 0) {
1366      os << "  (no managed stack frames)\n";
1367    }
1368  }
1369
1370  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
1371    ArtMethod* m = GetMethod();
1372    if (m->IsRuntimeMethod()) {
1373      return true;
1374    }
1375    m = m->GetInterfaceMethodIfProxy(sizeof(void*));
1376    const int kMaxRepetition = 3;
1377    mirror::Class* c = m->GetDeclaringClass();
1378    mirror::DexCache* dex_cache = c->GetDexCache();
1379    int line_number = -1;
1380    if (dex_cache != nullptr) {  // be tolerant of bad input
1381      const DexFile& dex_file = *dex_cache->GetDexFile();
1382      line_number = dex_file.GetLineNumFromPC(m, GetDexPc(false));
1383    }
1384    if (line_number == last_line_number && last_method == m) {
1385      ++repetition_count;
1386    } else {
1387      if (repetition_count >= kMaxRepetition) {
1388        os << "  ... repeated " << (repetition_count - kMaxRepetition) << " times\n";
1389      }
1390      repetition_count = 0;
1391      last_line_number = line_number;
1392      last_method = m;
1393    }
1394    if (repetition_count < kMaxRepetition) {
1395      os << "  at " << PrettyMethod(m, false);
1396      if (m->IsNative()) {
1397        os << "(Native method)";
1398      } else {
1399        const char* source_file(m->GetDeclaringClassSourceFile());
1400        os << "(" << (source_file != nullptr ? source_file : "unavailable")
1401           << ":" << line_number << ")";
1402      }
1403      os << "\n";
1404      if (frame_count == 0) {
1405        Monitor::DescribeWait(os, GetThread());
1406      }
1407      if (can_allocate) {
1408        // Visit locks, but do not abort on errors. This would trigger a nested abort.
1409        Monitor::VisitLocks(this, DumpLockedObject, &os, false);
1410      }
1411    }
1412
1413    ++frame_count;
1414    return true;
1415  }
1416
1417  static void DumpLockedObject(mirror::Object* o, void* context)
1418      SHARED_REQUIRES(Locks::mutator_lock_) {
1419    std::ostream& os = *reinterpret_cast<std::ostream*>(context);
1420    os << "  - locked ";
1421    if (o == nullptr) {
1422      os << "an unknown object";
1423    } else {
1424      if ((o->GetLockWord(false).GetState() == LockWord::kThinLocked) &&
1425          Locks::mutator_lock_->IsExclusiveHeld(Thread::Current())) {
1426        // Getting the identity hashcode here would result in lock inflation and suspension of the
1427        // current thread, which isn't safe if this is the only runnable thread.
1428        os << StringPrintf("<@addr=0x%" PRIxPTR "> (a %s)", reinterpret_cast<intptr_t>(o),
1429                           PrettyTypeOf(o).c_str());
1430      } else {
1431        // IdentityHashCode can cause thread suspension, which would invalidate o if it moved. So
1432        // we get the pretty type beofre we call IdentityHashCode.
1433        const std::string pretty_type(PrettyTypeOf(o));
1434        os << StringPrintf("<0x%08x> (a %s)", o->IdentityHashCode(), pretty_type.c_str());
1435      }
1436    }
1437    os << "\n";
1438  }
1439
1440  std::ostream& os;
1441  const bool can_allocate;
1442  ArtMethod* last_method;
1443  int last_line_number;
1444  int repetition_count;
1445  int frame_count;
1446};
1447
1448static bool ShouldShowNativeStack(const Thread* thread)
1449    SHARED_REQUIRES(Locks::mutator_lock_) {
1450  ThreadState state = thread->GetState();
1451
1452  // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting.
1453  if (state > kWaiting && state < kStarting) {
1454    return true;
1455  }
1456
1457  // In an Object.wait variant or Thread.sleep? That's not interesting.
1458  if (state == kTimedWaiting || state == kSleeping || state == kWaiting) {
1459    return false;
1460  }
1461
1462  // Threads with no managed stack frames should be shown.
1463  const ManagedStack* managed_stack = thread->GetManagedStack();
1464  if (managed_stack == nullptr || (managed_stack->GetTopQuickFrame() == nullptr &&
1465      managed_stack->GetTopShadowFrame() == nullptr)) {
1466    return true;
1467  }
1468
1469  // In some other native method? That's interesting.
1470  // We don't just check kNative because native methods will be in state kSuspended if they're
1471  // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the
1472  // thread-startup states if it's early enough in their life cycle (http://b/7432159).
1473  ArtMethod* current_method = thread->GetCurrentMethod(nullptr);
1474  return current_method != nullptr && current_method->IsNative();
1475}
1476
1477void Thread::DumpJavaStack(std::ostream& os) const {
1478  // If flip_function is not null, it means we have run a checkpoint
1479  // before the thread wakes up to execute the flip function and the
1480  // thread roots haven't been forwarded.  So the following access to
1481  // the roots (locks or methods in the frames) would be bad. Run it
1482  // here. TODO: clean up.
1483  {
1484    Thread* this_thread = const_cast<Thread*>(this);
1485    Closure* flip_func = this_thread->GetFlipFunction();
1486    if (flip_func != nullptr) {
1487      flip_func->Run(this_thread);
1488    }
1489  }
1490
1491  // Dumping the Java stack involves the verifier for locks. The verifier operates under the
1492  // assumption that there is no exception pending on entry. Thus, stash any pending exception.
1493  // Thread::Current() instead of this in case a thread is dumping the stack of another suspended
1494  // thread.
1495  StackHandleScope<1> scope(Thread::Current());
1496  Handle<mirror::Throwable> exc;
1497  bool have_exception = false;
1498  if (IsExceptionPending()) {
1499    exc = scope.NewHandle(GetException());
1500    const_cast<Thread*>(this)->ClearException();
1501    have_exception = true;
1502  }
1503
1504  std::unique_ptr<Context> context(Context::Create());
1505  StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(),
1506                          !tls32_.throwing_OutOfMemoryError);
1507  dumper.WalkStack();
1508
1509  if (have_exception) {
1510    const_cast<Thread*>(this)->SetException(exc.Get());
1511  }
1512}
1513
1514void Thread::DumpStack(std::ostream& os,
1515                       bool dump_native_stack,
1516                       BacktraceMap* backtrace_map) const {
1517  // TODO: we call this code when dying but may not have suspended the thread ourself. The
1518  //       IsSuspended check is therefore racy with the use for dumping (normally we inhibit
1519  //       the race with the thread_suspend_count_lock_).
1520  bool dump_for_abort = (gAborting > 0);
1521  bool safe_to_dump = (this == Thread::Current() || IsSuspended());
1522  if (!kIsDebugBuild) {
1523    // We always want to dump the stack for an abort, however, there is no point dumping another
1524    // thread's stack in debug builds where we'll hit the not suspended check in the stack walk.
1525    safe_to_dump = (safe_to_dump || dump_for_abort);
1526  }
1527  if (safe_to_dump) {
1528    // If we're currently in native code, dump that stack before dumping the managed stack.
1529    if (dump_native_stack && (dump_for_abort || ShouldShowNativeStack(this))) {
1530      DumpKernelStack(os, GetTid(), "  kernel: ", false);
1531      ArtMethod* method = GetCurrentMethod(nullptr, !dump_for_abort);
1532      DumpNativeStack(os, GetTid(), backtrace_map, "  native: ", method);
1533    }
1534    DumpJavaStack(os);
1535  } else {
1536    os << "Not able to dump stack of thread that isn't suspended";
1537  }
1538}
1539
1540void Thread::ThreadExitCallback(void* arg) {
1541  Thread* self = reinterpret_cast<Thread*>(arg);
1542  if (self->tls32_.thread_exit_check_count == 0) {
1543    LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's "
1544        "going to use a pthread_key_create destructor?): " << *self;
1545    CHECK(is_started_);
1546#ifdef __ANDROID__
1547    __get_tls()[TLS_SLOT_ART_THREAD_SELF] = self;
1548#else
1549    CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self");
1550#endif
1551    self->tls32_.thread_exit_check_count = 1;
1552  } else {
1553    LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self;
1554  }
1555}
1556
1557void Thread::Startup() {
1558  CHECK(!is_started_);
1559  is_started_ = true;
1560  {
1561    // MutexLock to keep annotalysis happy.
1562    //
1563    // Note we use null for the thread because Thread::Current can
1564    // return garbage since (is_started_ == true) and
1565    // Thread::pthread_key_self_ is not yet initialized.
1566    // This was seen on glibc.
1567    MutexLock mu(nullptr, *Locks::thread_suspend_count_lock_);
1568    resume_cond_ = new ConditionVariable("Thread resumption condition variable",
1569                                         *Locks::thread_suspend_count_lock_);
1570  }
1571
1572  // Allocate a TLS slot.
1573  CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback),
1574                     "self key");
1575
1576  // Double-check the TLS slot allocation.
1577  if (pthread_getspecific(pthread_key_self_) != nullptr) {
1578    LOG(FATAL) << "Newly-created pthread TLS slot is not nullptr";
1579  }
1580}
1581
1582void Thread::FinishStartup() {
1583  Runtime* runtime = Runtime::Current();
1584  CHECK(runtime->IsStarted());
1585
1586  // Finish attaching the main thread.
1587  ScopedObjectAccess soa(Thread::Current());
1588  Thread::Current()->CreatePeer("main", false, runtime->GetMainThreadGroup());
1589  Thread::Current()->AssertNoPendingException();
1590
1591  Runtime::Current()->GetClassLinker()->RunRootClinits();
1592}
1593
1594void Thread::Shutdown() {
1595  CHECK(is_started_);
1596  is_started_ = false;
1597  CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key");
1598  MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);
1599  if (resume_cond_ != nullptr) {
1600    delete resume_cond_;
1601    resume_cond_ = nullptr;
1602  }
1603}
1604
1605Thread::Thread(bool daemon)
1606    : tls32_(daemon),
1607      wait_monitor_(nullptr),
1608      interrupted_(false),
1609      can_call_into_java_(true) {
1610  wait_mutex_ = new Mutex("a thread wait mutex");
1611  wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
1612  tlsPtr_.instrumentation_stack = new std::deque<instrumentation::InstrumentationStackFrame>;
1613  tlsPtr_.name = new std::string(kThreadNameDuringStartup);
1614  tlsPtr_.nested_signal_state = static_cast<jmp_buf*>(malloc(sizeof(jmp_buf)));
1615
1616  static_assert((sizeof(Thread) % 4) == 0U,
1617                "art::Thread has a size which is not a multiple of 4.");
1618  tls32_.state_and_flags.as_struct.flags = 0;
1619  tls32_.state_and_flags.as_struct.state = kNative;
1620  memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes));
1621  std::fill(tlsPtr_.rosalloc_runs,
1622            tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBracketsInThread,
1623            gc::allocator::RosAlloc::GetDedicatedFullRun());
1624  for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
1625    tlsPtr_.checkpoint_functions[i] = nullptr;
1626  }
1627  for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) {
1628    tlsPtr_.active_suspend_barriers[i] = nullptr;
1629  }
1630  tlsPtr_.flip_function = nullptr;
1631  tlsPtr_.thread_local_mark_stack = nullptr;
1632  tls32_.suspended_at_suspend_check = false;
1633}
1634
1635bool Thread::IsStillStarting() const {
1636  // You might think you can check whether the state is kStarting, but for much of thread startup,
1637  // the thread is in kNative; it might also be in kVmWait.
1638  // You might think you can check whether the peer is null, but the peer is actually created and
1639  // assigned fairly early on, and needs to be.
1640  // It turns out that the last thing to change is the thread name; that's a good proxy for "has
1641  // this thread _ever_ entered kRunnable".
1642  return (tlsPtr_.jpeer == nullptr && tlsPtr_.opeer == nullptr) ||
1643      (*tlsPtr_.name == kThreadNameDuringStartup);
1644}
1645
1646void Thread::AssertPendingException() const {
1647  CHECK(IsExceptionPending()) << "Pending exception expected.";
1648}
1649
1650void Thread::AssertPendingOOMException() const {
1651  AssertPendingException();
1652  auto* e = GetException();
1653  CHECK_EQ(e->GetClass(), DecodeJObject(WellKnownClasses::java_lang_OutOfMemoryError)->AsClass())
1654      << e->Dump();
1655}
1656
1657void Thread::AssertNoPendingException() const {
1658  if (UNLIKELY(IsExceptionPending())) {
1659    ScopedObjectAccess soa(Thread::Current());
1660    mirror::Throwable* exception = GetException();
1661    LOG(FATAL) << "No pending exception expected: " << exception->Dump();
1662  }
1663}
1664
1665void Thread::AssertNoPendingExceptionForNewException(const char* msg) const {
1666  if (UNLIKELY(IsExceptionPending())) {
1667    ScopedObjectAccess soa(Thread::Current());
1668    mirror::Throwable* exception = GetException();
1669    LOG(FATAL) << "Throwing new exception '" << msg << "' with unexpected pending exception: "
1670        << exception->Dump();
1671  }
1672}
1673
1674class MonitorExitVisitor : public SingleRootVisitor {
1675 public:
1676  explicit MonitorExitVisitor(Thread* self) : self_(self) { }
1677
1678  // NO_THREAD_SAFETY_ANALYSIS due to MonitorExit.
1679  void VisitRoot(mirror::Object* entered_monitor, const RootInfo& info ATTRIBUTE_UNUSED)
1680      OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1681    if (self_->HoldsLock(entered_monitor)) {
1682      LOG(WARNING) << "Calling MonitorExit on object "
1683                   << entered_monitor << " (" << PrettyTypeOf(entered_monitor) << ")"
1684                   << " left locked by native thread "
1685                   << *Thread::Current() << " which is detaching";
1686      entered_monitor->MonitorExit(self_);
1687    }
1688  }
1689
1690 private:
1691  Thread* const self_;
1692};
1693
1694void Thread::Destroy() {
1695  Thread* self = this;
1696  DCHECK_EQ(self, Thread::Current());
1697
1698  if (tlsPtr_.jni_env != nullptr) {
1699    {
1700      ScopedObjectAccess soa(self);
1701      MonitorExitVisitor visitor(self);
1702      // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
1703      tlsPtr_.jni_env->monitors.VisitRoots(&visitor, RootInfo(kRootVMInternal));
1704    }
1705    // Release locally held global references which releasing may require the mutator lock.
1706    if (tlsPtr_.jpeer != nullptr) {
1707      // If pthread_create fails we don't have a jni env here.
1708      tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.jpeer);
1709      tlsPtr_.jpeer = nullptr;
1710    }
1711    if (tlsPtr_.class_loader_override != nullptr) {
1712      tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.class_loader_override);
1713      tlsPtr_.class_loader_override = nullptr;
1714    }
1715  }
1716
1717  if (tlsPtr_.opeer != nullptr) {
1718    ScopedObjectAccess soa(self);
1719    // We may need to call user-supplied managed code, do this before final clean-up.
1720    HandleUncaughtExceptions(soa);
1721    RemoveFromThreadGroup(soa);
1722
1723    // this.nativePeer = 0;
1724    if (Runtime::Current()->IsActiveTransaction()) {
1725      soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)
1726          ->SetLong<true>(tlsPtr_.opeer, 0);
1727    } else {
1728      soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)
1729          ->SetLong<false>(tlsPtr_.opeer, 0);
1730    }
1731    Dbg::PostThreadDeath(self);
1732
1733    // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone
1734    // who is waiting.
1735    mirror::Object* lock =
1736        soa.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(tlsPtr_.opeer);
1737    // (This conditional is only needed for tests, where Thread.lock won't have been set.)
1738    if (lock != nullptr) {
1739      StackHandleScope<1> hs(self);
1740      Handle<mirror::Object> h_obj(hs.NewHandle(lock));
1741      ObjectLock<mirror::Object> locker(self, h_obj);
1742      locker.NotifyAll();
1743    }
1744    tlsPtr_.opeer = nullptr;
1745  }
1746
1747  {
1748    ScopedObjectAccess soa(self);
1749    Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this);
1750    if (kUseReadBarrier) {
1751      Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->RevokeThreadLocalMarkStack(this);
1752    }
1753  }
1754}
1755
1756Thread::~Thread() {
1757  CHECK(tlsPtr_.class_loader_override == nullptr);
1758  CHECK(tlsPtr_.jpeer == nullptr);
1759  CHECK(tlsPtr_.opeer == nullptr);
1760  bool initialized = (tlsPtr_.jni_env != nullptr);  // Did Thread::Init run?
1761  if (initialized) {
1762    delete tlsPtr_.jni_env;
1763    tlsPtr_.jni_env = nullptr;
1764  }
1765  CHECK_NE(GetState(), kRunnable);
1766  CHECK_NE(ReadFlag(kCheckpointRequest), true);
1767  CHECK(tlsPtr_.checkpoint_functions[0] == nullptr);
1768  CHECK(tlsPtr_.checkpoint_functions[1] == nullptr);
1769  CHECK(tlsPtr_.checkpoint_functions[2] == nullptr);
1770  CHECK(tlsPtr_.flip_function == nullptr);
1771  CHECK_EQ(tls32_.suspended_at_suspend_check, false);
1772
1773  // Make sure we processed all deoptimization requests.
1774  CHECK(tlsPtr_.deoptimization_context_stack == nullptr) << "Missed deoptimization";
1775  CHECK(tlsPtr_.frame_id_to_shadow_frame == nullptr) <<
1776      "Not all deoptimized frames have been consumed by the debugger.";
1777
1778  // We may be deleting a still born thread.
1779  SetStateUnsafe(kTerminated);
1780
1781  delete wait_cond_;
1782  delete wait_mutex_;
1783
1784  if (tlsPtr_.long_jump_context != nullptr) {
1785    delete tlsPtr_.long_jump_context;
1786  }
1787
1788  if (initialized) {
1789    CleanupCpu();
1790  }
1791
1792  if (tlsPtr_.single_step_control != nullptr) {
1793    delete tlsPtr_.single_step_control;
1794  }
1795  delete tlsPtr_.instrumentation_stack;
1796  delete tlsPtr_.name;
1797  delete tlsPtr_.stack_trace_sample;
1798  free(tlsPtr_.nested_signal_state);
1799
1800  Runtime::Current()->GetHeap()->AssertThreadLocalBuffersAreRevoked(this);
1801
1802  TearDownAlternateSignalStack();
1803}
1804
1805void Thread::HandleUncaughtExceptions(ScopedObjectAccess& soa) {
1806  if (!IsExceptionPending()) {
1807    return;
1808  }
1809  ScopedLocalRef<jobject> peer(tlsPtr_.jni_env, soa.AddLocalReference<jobject>(tlsPtr_.opeer));
1810  ScopedThreadStateChange tsc(this, kNative);
1811
1812  // Get and clear the exception.
1813  ScopedLocalRef<jthrowable> exception(tlsPtr_.jni_env, tlsPtr_.jni_env->ExceptionOccurred());
1814  tlsPtr_.jni_env->ExceptionClear();
1815
1816  // If the thread has its own handler, use that.
1817  ScopedLocalRef<jobject> handler(tlsPtr_.jni_env,
1818                                  tlsPtr_.jni_env->GetObjectField(peer.get(),
1819                                      WellKnownClasses::java_lang_Thread_uncaughtHandler));
1820  if (handler.get() == nullptr) {
1821    // Otherwise use the thread group's default handler.
1822    handler.reset(tlsPtr_.jni_env->GetObjectField(peer.get(),
1823                                                  WellKnownClasses::java_lang_Thread_group));
1824  }
1825
1826  // Call the handler.
1827  tlsPtr_.jni_env->CallVoidMethod(handler.get(),
1828      WellKnownClasses::java_lang_Thread__UncaughtExceptionHandler_uncaughtException,
1829      peer.get(), exception.get());
1830
1831  // If the handler threw, clear that exception too.
1832  tlsPtr_.jni_env->ExceptionClear();
1833}
1834
1835void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) {
1836  // this.group.removeThread(this);
1837  // group can be null if we're in the compiler or a test.
1838  mirror::Object* ogroup = soa.DecodeField(WellKnownClasses::java_lang_Thread_group)
1839      ->GetObject(tlsPtr_.opeer);
1840  if (ogroup != nullptr) {
1841    ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup));
1842    ScopedLocalRef<jobject> peer(soa.Env(), soa.AddLocalReference<jobject>(tlsPtr_.opeer));
1843    ScopedThreadStateChange tsc(soa.Self(), kNative);
1844    tlsPtr_.jni_env->CallVoidMethod(group.get(),
1845                                    WellKnownClasses::java_lang_ThreadGroup_removeThread,
1846                                    peer.get());
1847  }
1848}
1849
1850size_t Thread::NumHandleReferences() {
1851  size_t count = 0;
1852  for (HandleScope* cur = tlsPtr_.top_handle_scope; cur != nullptr; cur = cur->GetLink()) {
1853    count += cur->NumberOfReferences();
1854  }
1855  return count;
1856}
1857
1858bool Thread::HandleScopeContains(jobject obj) const {
1859  StackReference<mirror::Object>* hs_entry =
1860      reinterpret_cast<StackReference<mirror::Object>*>(obj);
1861  for (HandleScope* cur = tlsPtr_.top_handle_scope; cur!= nullptr; cur = cur->GetLink()) {
1862    if (cur->Contains(hs_entry)) {
1863      return true;
1864    }
1865  }
1866  // JNI code invoked from portable code uses shadow frames rather than the handle scope.
1867  return tlsPtr_.managed_stack.ShadowFramesContain(hs_entry);
1868}
1869
1870void Thread::HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) {
1871  BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
1872      visitor, RootInfo(kRootNativeStack, thread_id));
1873  for (HandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
1874    for (size_t j = 0, count = cur->NumberOfReferences(); j < count; ++j) {
1875      // GetReference returns a pointer to the stack reference within the handle scope. If this
1876      // needs to be updated, it will be done by the root visitor.
1877      buffered_visitor.VisitRootIfNonNull(cur->GetHandle(j).GetReference());
1878    }
1879  }
1880}
1881
1882mirror::Object* Thread::DecodeJObject(jobject obj) const {
1883  if (obj == nullptr) {
1884    return nullptr;
1885  }
1886  IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
1887  IndirectRefKind kind = GetIndirectRefKind(ref);
1888  mirror::Object* result;
1889  bool expect_null = false;
1890  // The "kinds" below are sorted by the frequency we expect to encounter them.
1891  if (kind == kLocal) {
1892    IndirectReferenceTable& locals = tlsPtr_.jni_env->locals;
1893    // Local references do not need a read barrier.
1894    result = locals.Get<kWithoutReadBarrier>(ref);
1895  } else if (kind == kHandleScopeOrInvalid) {
1896    // TODO: make stack indirect reference table lookup more efficient.
1897    // Check if this is a local reference in the handle scope.
1898    if (LIKELY(HandleScopeContains(obj))) {
1899      // Read from handle scope.
1900      result = reinterpret_cast<StackReference<mirror::Object>*>(obj)->AsMirrorPtr();
1901      VerifyObject(result);
1902    } else {
1903      tlsPtr_.jni_env->vm->JniAbortF(nullptr, "use of invalid jobject %p", obj);
1904      expect_null = true;
1905      result = nullptr;
1906    }
1907  } else if (kind == kGlobal) {
1908    result = tlsPtr_.jni_env->vm->DecodeGlobal(ref);
1909  } else {
1910    DCHECK_EQ(kind, kWeakGlobal);
1911    result = tlsPtr_.jni_env->vm->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
1912    if (Runtime::Current()->IsClearedJniWeakGlobal(result)) {
1913      // This is a special case where it's okay to return null.
1914      expect_null = true;
1915      result = nullptr;
1916    }
1917  }
1918
1919  if (UNLIKELY(!expect_null && result == nullptr)) {
1920    tlsPtr_.jni_env->vm->JniAbortF(nullptr, "use of deleted %s %p",
1921                                   ToStr<IndirectRefKind>(kind).c_str(), obj);
1922  }
1923  return result;
1924}
1925
1926bool Thread::IsJWeakCleared(jweak obj) const {
1927  CHECK(obj != nullptr);
1928  IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
1929  IndirectRefKind kind = GetIndirectRefKind(ref);
1930  CHECK_EQ(kind, kWeakGlobal);
1931  return tlsPtr_.jni_env->vm->IsWeakGlobalCleared(const_cast<Thread*>(this), ref);
1932}
1933
1934// Implements java.lang.Thread.interrupted.
1935bool Thread::Interrupted() {
1936  MutexLock mu(Thread::Current(), *wait_mutex_);
1937  bool interrupted = IsInterruptedLocked();
1938  SetInterruptedLocked(false);
1939  return interrupted;
1940}
1941
1942// Implements java.lang.Thread.isInterrupted.
1943bool Thread::IsInterrupted() {
1944  MutexLock mu(Thread::Current(), *wait_mutex_);
1945  return IsInterruptedLocked();
1946}
1947
1948void Thread::Interrupt(Thread* self) {
1949  MutexLock mu(self, *wait_mutex_);
1950  if (interrupted_) {
1951    return;
1952  }
1953  interrupted_ = true;
1954  NotifyLocked(self);
1955}
1956
1957void Thread::Notify() {
1958  Thread* self = Thread::Current();
1959  MutexLock mu(self, *wait_mutex_);
1960  NotifyLocked(self);
1961}
1962
1963void Thread::NotifyLocked(Thread* self) {
1964  if (wait_monitor_ != nullptr) {
1965    wait_cond_->Signal(self);
1966  }
1967}
1968
1969void Thread::SetClassLoaderOverride(jobject class_loader_override) {
1970  if (tlsPtr_.class_loader_override != nullptr) {
1971    GetJniEnv()->DeleteGlobalRef(tlsPtr_.class_loader_override);
1972  }
1973  tlsPtr_.class_loader_override = GetJniEnv()->NewGlobalRef(class_loader_override);
1974}
1975
1976class CountStackDepthVisitor : public StackVisitor {
1977 public:
1978  explicit CountStackDepthVisitor(Thread* thread)
1979      SHARED_REQUIRES(Locks::mutator_lock_)
1980      : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
1981        depth_(0), skip_depth_(0), skipping_(true) {}
1982
1983  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
1984    // We want to skip frames up to and including the exception's constructor.
1985    // Note we also skip the frame if it doesn't have a method (namely the callee
1986    // save frame)
1987    ArtMethod* m = GetMethod();
1988    if (skipping_ && !m->IsRuntimeMethod() &&
1989        !mirror::Throwable::GetJavaLangThrowable()->IsAssignableFrom(m->GetDeclaringClass())) {
1990      skipping_ = false;
1991    }
1992    if (!skipping_) {
1993      if (!m->IsRuntimeMethod()) {  // Ignore runtime frames (in particular callee save).
1994        ++depth_;
1995      }
1996    } else {
1997      ++skip_depth_;
1998    }
1999    return true;
2000  }
2001
2002  int GetDepth() const {
2003    return depth_;
2004  }
2005
2006  int GetSkipDepth() const {
2007    return skip_depth_;
2008  }
2009
2010 private:
2011  uint32_t depth_;
2012  uint32_t skip_depth_;
2013  bool skipping_;
2014
2015  DISALLOW_COPY_AND_ASSIGN(CountStackDepthVisitor);
2016};
2017
2018template<bool kTransactionActive>
2019class BuildInternalStackTraceVisitor : public StackVisitor {
2020 public:
2021  BuildInternalStackTraceVisitor(Thread* self, Thread* thread, int skip_depth)
2022      : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2023        self_(self),
2024        skip_depth_(skip_depth),
2025        count_(0),
2026        trace_(nullptr),
2027        pointer_size_(Runtime::Current()->GetClassLinker()->GetImagePointerSize()) {}
2028
2029  bool Init(int depth) SHARED_REQUIRES(Locks::mutator_lock_) ACQUIRE(Roles::uninterruptible_) {
2030    // Allocate method trace as an object array where the first element is a pointer array that
2031    // contains the ArtMethod pointers and dex PCs. The rest of the elements are the declaring
2032    // class of the ArtMethod pointers.
2033    ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
2034    StackHandleScope<1> hs(self_);
2035    mirror::Class* array_class = class_linker->GetClassRoot(ClassLinker::kObjectArrayClass);
2036    // The first element is the methods and dex pc array, the other elements are declaring classes
2037    // for the methods to ensure classes in the stack trace don't get unloaded.
2038    Handle<mirror::ObjectArray<mirror::Object>> trace(
2039        hs.NewHandle(
2040            mirror::ObjectArray<mirror::Object>::Alloc(hs.Self(), array_class, depth + 1)));
2041    if (trace.Get() == nullptr) {
2042      // Acquire uninterruptible_ in all paths.
2043      self_->StartAssertNoThreadSuspension("Building internal stack trace");
2044      self_->AssertPendingOOMException();
2045      return false;
2046    }
2047    mirror::PointerArray* methods_and_pcs = class_linker->AllocPointerArray(self_, depth * 2);
2048    const char* last_no_suspend_cause =
2049        self_->StartAssertNoThreadSuspension("Building internal stack trace");
2050    if (methods_and_pcs == nullptr) {
2051      self_->AssertPendingOOMException();
2052      return false;
2053    }
2054    trace->Set(0, methods_and_pcs);
2055    trace_ = trace.Get();
2056    // If We are called from native, use non-transactional mode.
2057    CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause;
2058    return true;
2059  }
2060
2061  virtual ~BuildInternalStackTraceVisitor() RELEASE(Roles::uninterruptible_) {
2062    self_->EndAssertNoThreadSuspension(nullptr);
2063  }
2064
2065  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
2066    if (trace_ == nullptr) {
2067      return true;  // We're probably trying to fillInStackTrace for an OutOfMemoryError.
2068    }
2069    if (skip_depth_ > 0) {
2070      skip_depth_--;
2071      return true;
2072    }
2073    ArtMethod* m = GetMethod();
2074    if (m->IsRuntimeMethod()) {
2075      return true;  // Ignore runtime frames (in particular callee save).
2076    }
2077    mirror::PointerArray* trace_methods_and_pcs = GetTraceMethodsAndPCs();
2078    trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>(count_, m, pointer_size_);
2079    trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>(
2080        trace_methods_and_pcs->GetLength() / 2 + count_,
2081        m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc(),
2082        pointer_size_);
2083    // Save the declaring class of the method to ensure that the declaring classes of the methods
2084    // do not get unloaded while the stack trace is live.
2085    trace_->Set(count_ + 1, m->GetDeclaringClass());
2086    ++count_;
2087    return true;
2088  }
2089
2090  mirror::PointerArray* GetTraceMethodsAndPCs() const SHARED_REQUIRES(Locks::mutator_lock_) {
2091    return down_cast<mirror::PointerArray*>(trace_->Get(0));
2092  }
2093
2094  mirror::ObjectArray<mirror::Object>* GetInternalStackTrace() const {
2095    return trace_;
2096  }
2097
2098 private:
2099  Thread* const self_;
2100  // How many more frames to skip.
2101  int32_t skip_depth_;
2102  // Current position down stack trace.
2103  uint32_t count_;
2104  // An object array where the first element is a pointer array that contains the ArtMethod
2105  // pointers on the stack and dex PCs. The rest of the elements are the declaring
2106  // class of the ArtMethod pointers. trace_[i+1] contains the declaring class of the ArtMethod of
2107  // the i'th frame.
2108  mirror::ObjectArray<mirror::Object>* trace_;
2109  // For cross compilation.
2110  const size_t pointer_size_;
2111
2112  DISALLOW_COPY_AND_ASSIGN(BuildInternalStackTraceVisitor);
2113};
2114
2115template<bool kTransactionActive>
2116jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const {
2117  // Compute depth of stack
2118  CountStackDepthVisitor count_visitor(const_cast<Thread*>(this));
2119  count_visitor.WalkStack();
2120  int32_t depth = count_visitor.GetDepth();
2121  int32_t skip_depth = count_visitor.GetSkipDepth();
2122
2123  // Build internal stack trace.
2124  BuildInternalStackTraceVisitor<kTransactionActive> build_trace_visitor(soa.Self(),
2125                                                                         const_cast<Thread*>(this),
2126                                                                         skip_depth);
2127  if (!build_trace_visitor.Init(depth)) {
2128    return nullptr;  // Allocation failed.
2129  }
2130  build_trace_visitor.WalkStack();
2131  mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace();
2132  if (kIsDebugBuild) {
2133    mirror::PointerArray* trace_methods = build_trace_visitor.GetTraceMethodsAndPCs();
2134    // Second half of trace_methods is dex PCs.
2135    for (uint32_t i = 0; i < static_cast<uint32_t>(trace_methods->GetLength() / 2); ++i) {
2136      auto* method = trace_methods->GetElementPtrSize<ArtMethod*>(
2137          i, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
2138      CHECK(method != nullptr);
2139    }
2140  }
2141  return soa.AddLocalReference<jobject>(trace);
2142}
2143template jobject Thread::CreateInternalStackTrace<false>(
2144    const ScopedObjectAccessAlreadyRunnable& soa) const;
2145template jobject Thread::CreateInternalStackTrace<true>(
2146    const ScopedObjectAccessAlreadyRunnable& soa) const;
2147
2148bool Thread::IsExceptionThrownByCurrentMethod(mirror::Throwable* exception) const {
2149  CountStackDepthVisitor count_visitor(const_cast<Thread*>(this));
2150  count_visitor.WalkStack();
2151  return count_visitor.GetDepth() == exception->GetStackDepth();
2152}
2153
2154jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
2155    const ScopedObjectAccessAlreadyRunnable& soa,
2156    jobject internal,
2157    jobjectArray output_array,
2158    int* stack_depth) {
2159  // Decode the internal stack trace into the depth, method trace and PC trace.
2160  // Subtract one for the methods and PC trace.
2161  int32_t depth = soa.Decode<mirror::Array*>(internal)->GetLength() - 1;
2162  DCHECK_GE(depth, 0);
2163
2164  ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
2165
2166  jobjectArray result;
2167
2168  if (output_array != nullptr) {
2169    // Reuse the array we were given.
2170    result = output_array;
2171    // ...adjusting the number of frames we'll write to not exceed the array length.
2172    const int32_t traces_length =
2173        soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(result)->GetLength();
2174    depth = std::min(depth, traces_length);
2175  } else {
2176    // Create java_trace array and place in local reference table
2177    mirror::ObjectArray<mirror::StackTraceElement>* java_traces =
2178        class_linker->AllocStackTraceElementArray(soa.Self(), depth);
2179    if (java_traces == nullptr) {
2180      return nullptr;
2181    }
2182    result = soa.AddLocalReference<jobjectArray>(java_traces);
2183  }
2184
2185  if (stack_depth != nullptr) {
2186    *stack_depth = depth;
2187  }
2188
2189  for (int32_t i = 0; i < depth; ++i) {
2190    mirror::ObjectArray<mirror::Object>* decoded_traces =
2191        soa.Decode<mirror::Object*>(internal)->AsObjectArray<mirror::Object>();
2192    // Methods and dex PC trace is element 0.
2193    DCHECK(decoded_traces->Get(0)->IsIntArray() || decoded_traces->Get(0)->IsLongArray());
2194    mirror::PointerArray* const method_trace =
2195        down_cast<mirror::PointerArray*>(decoded_traces->Get(0));
2196    // Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
2197    ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, sizeof(void*));
2198    uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>(
2199        i + method_trace->GetLength() / 2, sizeof(void*));
2200    int32_t line_number;
2201    StackHandleScope<3> hs(soa.Self());
2202    auto class_name_object(hs.NewHandle<mirror::String>(nullptr));
2203    auto source_name_object(hs.NewHandle<mirror::String>(nullptr));
2204    if (method->IsProxyMethod()) {
2205      line_number = -1;
2206      class_name_object.Assign(method->GetDeclaringClass()->GetName());
2207      // source_name_object intentionally left null for proxy methods
2208    } else {
2209      line_number = method->GetLineNumFromDexPC(dex_pc);
2210      // Allocate element, potentially triggering GC
2211      // TODO: reuse class_name_object via Class::name_?
2212      const char* descriptor = method->GetDeclaringClassDescriptor();
2213      CHECK(descriptor != nullptr);
2214      std::string class_name(PrettyDescriptor(descriptor));
2215      class_name_object.Assign(
2216          mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str()));
2217      if (class_name_object.Get() == nullptr) {
2218        soa.Self()->AssertPendingOOMException();
2219        return nullptr;
2220      }
2221      const char* source_file = method->GetDeclaringClassSourceFile();
2222      if (source_file != nullptr) {
2223        source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file));
2224        if (source_name_object.Get() == nullptr) {
2225          soa.Self()->AssertPendingOOMException();
2226          return nullptr;
2227        }
2228      }
2229    }
2230    const char* method_name = method->GetInterfaceMethodIfProxy(sizeof(void*))->GetName();
2231    CHECK(method_name != nullptr);
2232    Handle<mirror::String> method_name_object(
2233        hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name)));
2234    if (method_name_object.Get() == nullptr) {
2235      return nullptr;
2236    }
2237    mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(
2238        soa.Self(), class_name_object, method_name_object, source_name_object, line_number);
2239    if (obj == nullptr) {
2240      return nullptr;
2241    }
2242    // We are called from native: use non-transactional mode.
2243    soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(result)->Set<false>(i, obj);
2244  }
2245  return result;
2246}
2247
2248void Thread::ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) {
2249  va_list args;
2250  va_start(args, fmt);
2251  ThrowNewExceptionV(exception_class_descriptor, fmt, args);
2252  va_end(args);
2253}
2254
2255void Thread::ThrowNewExceptionV(const char* exception_class_descriptor,
2256                                const char* fmt, va_list ap) {
2257  std::string msg;
2258  StringAppendV(&msg, fmt, ap);
2259  ThrowNewException(exception_class_descriptor, msg.c_str());
2260}
2261
2262void Thread::ThrowNewException(const char* exception_class_descriptor,
2263                               const char* msg) {
2264  // Callers should either clear or call ThrowNewWrappedException.
2265  AssertNoPendingExceptionForNewException(msg);
2266  ThrowNewWrappedException(exception_class_descriptor, msg);
2267}
2268
2269static mirror::ClassLoader* GetCurrentClassLoader(Thread* self)
2270    SHARED_REQUIRES(Locks::mutator_lock_) {
2271  ArtMethod* method = self->GetCurrentMethod(nullptr);
2272  return method != nullptr
2273      ? method->GetDeclaringClass()->GetClassLoader()
2274      : nullptr;
2275}
2276
2277void Thread::ThrowNewWrappedException(const char* exception_class_descriptor,
2278                                      const char* msg) {
2279  DCHECK_EQ(this, Thread::Current());
2280  ScopedObjectAccessUnchecked soa(this);
2281  StackHandleScope<3> hs(soa.Self());
2282  Handle<mirror::ClassLoader> class_loader(hs.NewHandle(GetCurrentClassLoader(soa.Self())));
2283  ScopedLocalRef<jobject> cause(GetJniEnv(), soa.AddLocalReference<jobject>(GetException()));
2284  ClearException();
2285  Runtime* runtime = Runtime::Current();
2286  auto* cl = runtime->GetClassLinker();
2287  Handle<mirror::Class> exception_class(
2288      hs.NewHandle(cl->FindClass(this, exception_class_descriptor, class_loader)));
2289  if (UNLIKELY(exception_class.Get() == nullptr)) {
2290    CHECK(IsExceptionPending());
2291    LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor);
2292    return;
2293  }
2294
2295  if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(soa.Self(), exception_class, true,
2296                                                             true))) {
2297    DCHECK(IsExceptionPending());
2298    return;
2299  }
2300  DCHECK(!runtime->IsStarted() || exception_class->IsThrowableClass());
2301  Handle<mirror::Throwable> exception(
2302      hs.NewHandle(down_cast<mirror::Throwable*>(exception_class->AllocObject(this))));
2303
2304  // If we couldn't allocate the exception, throw the pre-allocated out of memory exception.
2305  if (exception.Get() == nullptr) {
2306    SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
2307    return;
2308  }
2309
2310  // Choose an appropriate constructor and set up the arguments.
2311  const char* signature;
2312  ScopedLocalRef<jstring> msg_string(GetJniEnv(), nullptr);
2313  if (msg != nullptr) {
2314    // Ensure we remember this and the method over the String allocation.
2315    msg_string.reset(
2316        soa.AddLocalReference<jstring>(mirror::String::AllocFromModifiedUtf8(this, msg)));
2317    if (UNLIKELY(msg_string.get() == nullptr)) {
2318      CHECK(IsExceptionPending());  // OOME.
2319      return;
2320    }
2321    if (cause.get() == nullptr) {
2322      signature = "(Ljava/lang/String;)V";
2323    } else {
2324      signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V";
2325    }
2326  } else {
2327    if (cause.get() == nullptr) {
2328      signature = "()V";
2329    } else {
2330      signature = "(Ljava/lang/Throwable;)V";
2331    }
2332  }
2333  ArtMethod* exception_init_method =
2334      exception_class->FindDeclaredDirectMethod("<init>", signature, cl->GetImagePointerSize());
2335
2336  CHECK(exception_init_method != nullptr) << "No <init>" << signature << " in "
2337      << PrettyDescriptor(exception_class_descriptor);
2338
2339  if (UNLIKELY(!runtime->IsStarted())) {
2340    // Something is trying to throw an exception without a started runtime, which is the common
2341    // case in the compiler. We won't be able to invoke the constructor of the exception, so set
2342    // the exception fields directly.
2343    if (msg != nullptr) {
2344      exception->SetDetailMessage(down_cast<mirror::String*>(DecodeJObject(msg_string.get())));
2345    }
2346    if (cause.get() != nullptr) {
2347      exception->SetCause(down_cast<mirror::Throwable*>(DecodeJObject(cause.get())));
2348    }
2349    ScopedLocalRef<jobject> trace(GetJniEnv(),
2350                                  Runtime::Current()->IsActiveTransaction()
2351                                      ? CreateInternalStackTrace<true>(soa)
2352                                      : CreateInternalStackTrace<false>(soa));
2353    if (trace.get() != nullptr) {
2354      exception->SetStackState(down_cast<mirror::Throwable*>(DecodeJObject(trace.get())));
2355    }
2356    SetException(exception.Get());
2357  } else {
2358    jvalue jv_args[2];
2359    size_t i = 0;
2360
2361    if (msg != nullptr) {
2362      jv_args[i].l = msg_string.get();
2363      ++i;
2364    }
2365    if (cause.get() != nullptr) {
2366      jv_args[i].l = cause.get();
2367      ++i;
2368    }
2369    ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(exception.Get()));
2370    InvokeWithJValues(soa, ref.get(), soa.EncodeMethod(exception_init_method), jv_args);
2371    if (LIKELY(!IsExceptionPending())) {
2372      SetException(exception.Get());
2373    }
2374  }
2375}
2376
2377void Thread::ThrowOutOfMemoryError(const char* msg) {
2378  LOG(WARNING) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s",
2379      msg, (tls32_.throwing_OutOfMemoryError ? " (recursive case)" : ""));
2380  if (!tls32_.throwing_OutOfMemoryError) {
2381    tls32_.throwing_OutOfMemoryError = true;
2382    ThrowNewException("Ljava/lang/OutOfMemoryError;", msg);
2383    tls32_.throwing_OutOfMemoryError = false;
2384  } else {
2385    Dump(LOG(WARNING));  // The pre-allocated OOME has no stack, so help out and log one.
2386    SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
2387  }
2388}
2389
2390Thread* Thread::CurrentFromGdb() {
2391  return Thread::Current();
2392}
2393
2394void Thread::DumpFromGdb() const {
2395  std::ostringstream ss;
2396  Dump(ss);
2397  std::string str(ss.str());
2398  // log to stderr for debugging command line processes
2399  std::cerr << str;
2400#ifdef __ANDROID__
2401  // log to logcat for debugging frameworks processes
2402  LOG(INFO) << str;
2403#endif
2404}
2405
2406// Explicitly instantiate 32 and 64bit thread offset dumping support.
2407template void Thread::DumpThreadOffset<4>(std::ostream& os, uint32_t offset);
2408template void Thread::DumpThreadOffset<8>(std::ostream& os, uint32_t offset);
2409
2410template<size_t ptr_size>
2411void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
2412#define DO_THREAD_OFFSET(x, y) \
2413    if (offset == x.Uint32Value()) { \
2414      os << y; \
2415      return; \
2416    }
2417  DO_THREAD_OFFSET(ThreadFlagsOffset<ptr_size>(), "state_and_flags")
2418  DO_THREAD_OFFSET(CardTableOffset<ptr_size>(), "card_table")
2419  DO_THREAD_OFFSET(ExceptionOffset<ptr_size>(), "exception")
2420  DO_THREAD_OFFSET(PeerOffset<ptr_size>(), "peer");
2421  DO_THREAD_OFFSET(JniEnvOffset<ptr_size>(), "jni_env")
2422  DO_THREAD_OFFSET(SelfOffset<ptr_size>(), "self")
2423  DO_THREAD_OFFSET(StackEndOffset<ptr_size>(), "stack_end")
2424  DO_THREAD_OFFSET(ThinLockIdOffset<ptr_size>(), "thin_lock_thread_id")
2425  DO_THREAD_OFFSET(TopOfManagedStackOffset<ptr_size>(), "top_quick_frame_method")
2426  DO_THREAD_OFFSET(TopShadowFrameOffset<ptr_size>(), "top_shadow_frame")
2427  DO_THREAD_OFFSET(TopHandleScopeOffset<ptr_size>(), "top_handle_scope")
2428  DO_THREAD_OFFSET(ThreadSuspendTriggerOffset<ptr_size>(), "suspend_trigger")
2429#undef DO_THREAD_OFFSET
2430
2431#define JNI_ENTRY_POINT_INFO(x) \
2432    if (JNI_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
2433      os << #x; \
2434      return; \
2435    }
2436  JNI_ENTRY_POINT_INFO(pDlsymLookup)
2437#undef JNI_ENTRY_POINT_INFO
2438
2439#define QUICK_ENTRY_POINT_INFO(x) \
2440    if (QUICK_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
2441      os << #x; \
2442      return; \
2443    }
2444  QUICK_ENTRY_POINT_INFO(pAllocArray)
2445  QUICK_ENTRY_POINT_INFO(pAllocArrayResolved)
2446  QUICK_ENTRY_POINT_INFO(pAllocArrayWithAccessCheck)
2447  QUICK_ENTRY_POINT_INFO(pAllocObject)
2448  QUICK_ENTRY_POINT_INFO(pAllocObjectResolved)
2449  QUICK_ENTRY_POINT_INFO(pAllocObjectInitialized)
2450  QUICK_ENTRY_POINT_INFO(pAllocObjectWithAccessCheck)
2451  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArray)
2452  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayWithAccessCheck)
2453  QUICK_ENTRY_POINT_INFO(pAllocStringFromBytes)
2454  QUICK_ENTRY_POINT_INFO(pAllocStringFromChars)
2455  QUICK_ENTRY_POINT_INFO(pAllocStringFromString)
2456  QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial)
2457  QUICK_ENTRY_POINT_INFO(pCheckCast)
2458  QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage)
2459  QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess)
2460  QUICK_ENTRY_POINT_INFO(pInitializeType)
2461  QUICK_ENTRY_POINT_INFO(pResolveString)
2462  QUICK_ENTRY_POINT_INFO(pSet8Instance)
2463  QUICK_ENTRY_POINT_INFO(pSet8Static)
2464  QUICK_ENTRY_POINT_INFO(pSet16Instance)
2465  QUICK_ENTRY_POINT_INFO(pSet16Static)
2466  QUICK_ENTRY_POINT_INFO(pSet32Instance)
2467  QUICK_ENTRY_POINT_INFO(pSet32Static)
2468  QUICK_ENTRY_POINT_INFO(pSet64Instance)
2469  QUICK_ENTRY_POINT_INFO(pSet64Static)
2470  QUICK_ENTRY_POINT_INFO(pSetObjInstance)
2471  QUICK_ENTRY_POINT_INFO(pSetObjStatic)
2472  QUICK_ENTRY_POINT_INFO(pGetByteInstance)
2473  QUICK_ENTRY_POINT_INFO(pGetBooleanInstance)
2474  QUICK_ENTRY_POINT_INFO(pGetByteStatic)
2475  QUICK_ENTRY_POINT_INFO(pGetBooleanStatic)
2476  QUICK_ENTRY_POINT_INFO(pGetShortInstance)
2477  QUICK_ENTRY_POINT_INFO(pGetCharInstance)
2478  QUICK_ENTRY_POINT_INFO(pGetShortStatic)
2479  QUICK_ENTRY_POINT_INFO(pGetCharStatic)
2480  QUICK_ENTRY_POINT_INFO(pGet32Instance)
2481  QUICK_ENTRY_POINT_INFO(pGet32Static)
2482  QUICK_ENTRY_POINT_INFO(pGet64Instance)
2483  QUICK_ENTRY_POINT_INFO(pGet64Static)
2484  QUICK_ENTRY_POINT_INFO(pGetObjInstance)
2485  QUICK_ENTRY_POINT_INFO(pGetObjStatic)
2486  QUICK_ENTRY_POINT_INFO(pAputObjectWithNullAndBoundCheck)
2487  QUICK_ENTRY_POINT_INFO(pAputObjectWithBoundCheck)
2488  QUICK_ENTRY_POINT_INFO(pAputObject)
2489  QUICK_ENTRY_POINT_INFO(pHandleFillArrayData)
2490  QUICK_ENTRY_POINT_INFO(pJniMethodStart)
2491  QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized)
2492  QUICK_ENTRY_POINT_INFO(pJniMethodEnd)
2493  QUICK_ENTRY_POINT_INFO(pJniMethodEndSynchronized)
2494  QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReference)
2495  QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized)
2496  QUICK_ENTRY_POINT_INFO(pQuickGenericJniTrampoline)
2497  QUICK_ENTRY_POINT_INFO(pLockObject)
2498  QUICK_ENTRY_POINT_INFO(pUnlockObject)
2499  QUICK_ENTRY_POINT_INFO(pCmpgDouble)
2500  QUICK_ENTRY_POINT_INFO(pCmpgFloat)
2501  QUICK_ENTRY_POINT_INFO(pCmplDouble)
2502  QUICK_ENTRY_POINT_INFO(pCmplFloat)
2503  QUICK_ENTRY_POINT_INFO(pCos)
2504  QUICK_ENTRY_POINT_INFO(pSin)
2505  QUICK_ENTRY_POINT_INFO(pAcos)
2506  QUICK_ENTRY_POINT_INFO(pAsin)
2507  QUICK_ENTRY_POINT_INFO(pAtan)
2508  QUICK_ENTRY_POINT_INFO(pAtan2)
2509  QUICK_ENTRY_POINT_INFO(pCbrt)
2510  QUICK_ENTRY_POINT_INFO(pCosh)
2511  QUICK_ENTRY_POINT_INFO(pExp)
2512  QUICK_ENTRY_POINT_INFO(pExpm1)
2513  QUICK_ENTRY_POINT_INFO(pHypot)
2514  QUICK_ENTRY_POINT_INFO(pLog)
2515  QUICK_ENTRY_POINT_INFO(pLog10)
2516  QUICK_ENTRY_POINT_INFO(pNextAfter)
2517  QUICK_ENTRY_POINT_INFO(pSinh)
2518  QUICK_ENTRY_POINT_INFO(pTan)
2519  QUICK_ENTRY_POINT_INFO(pTanh)
2520  QUICK_ENTRY_POINT_INFO(pFmod)
2521  QUICK_ENTRY_POINT_INFO(pL2d)
2522  QUICK_ENTRY_POINT_INFO(pFmodf)
2523  QUICK_ENTRY_POINT_INFO(pL2f)
2524  QUICK_ENTRY_POINT_INFO(pD2iz)
2525  QUICK_ENTRY_POINT_INFO(pF2iz)
2526  QUICK_ENTRY_POINT_INFO(pIdivmod)
2527  QUICK_ENTRY_POINT_INFO(pD2l)
2528  QUICK_ENTRY_POINT_INFO(pF2l)
2529  QUICK_ENTRY_POINT_INFO(pLdiv)
2530  QUICK_ENTRY_POINT_INFO(pLmod)
2531  QUICK_ENTRY_POINT_INFO(pLmul)
2532  QUICK_ENTRY_POINT_INFO(pShlLong)
2533  QUICK_ENTRY_POINT_INFO(pShrLong)
2534  QUICK_ENTRY_POINT_INFO(pUshrLong)
2535  QUICK_ENTRY_POINT_INFO(pIndexOf)
2536  QUICK_ENTRY_POINT_INFO(pStringCompareTo)
2537  QUICK_ENTRY_POINT_INFO(pMemcpy)
2538  QUICK_ENTRY_POINT_INFO(pQuickImtConflictTrampoline)
2539  QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampoline)
2540  QUICK_ENTRY_POINT_INFO(pQuickToInterpreterBridge)
2541  QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck)
2542  QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck)
2543  QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck)
2544  QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck)
2545  QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck)
2546  QUICK_ENTRY_POINT_INFO(pTestSuspend)
2547  QUICK_ENTRY_POINT_INFO(pDeliverException)
2548  QUICK_ENTRY_POINT_INFO(pThrowArrayBounds)
2549  QUICK_ENTRY_POINT_INFO(pThrowDivZero)
2550  QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethod)
2551  QUICK_ENTRY_POINT_INFO(pThrowNullPointer)
2552  QUICK_ENTRY_POINT_INFO(pThrowStackOverflow)
2553  QUICK_ENTRY_POINT_INFO(pDeoptimize)
2554  QUICK_ENTRY_POINT_INFO(pA64Load)
2555  QUICK_ENTRY_POINT_INFO(pA64Store)
2556  QUICK_ENTRY_POINT_INFO(pNewEmptyString)
2557  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_B)
2558  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BI)
2559  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BII)
2560  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIII)
2561  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIIString)
2562  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BString)
2563  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIICharset)
2564  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BCharset)
2565  QUICK_ENTRY_POINT_INFO(pNewStringFromChars_C)
2566  QUICK_ENTRY_POINT_INFO(pNewStringFromChars_CII)
2567  QUICK_ENTRY_POINT_INFO(pNewStringFromChars_IIC)
2568  QUICK_ENTRY_POINT_INFO(pNewStringFromCodePoints)
2569  QUICK_ENTRY_POINT_INFO(pNewStringFromString)
2570  QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuffer)
2571  QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuilder)
2572  QUICK_ENTRY_POINT_INFO(pReadBarrierJni)
2573  QUICK_ENTRY_POINT_INFO(pReadBarrierMark)
2574  QUICK_ENTRY_POINT_INFO(pReadBarrierSlow)
2575  QUICK_ENTRY_POINT_INFO(pReadBarrierForRootSlow)
2576#undef QUICK_ENTRY_POINT_INFO
2577
2578  os << offset;
2579}
2580
2581void Thread::QuickDeliverException() {
2582  // Get exception from thread.
2583  mirror::Throwable* exception = GetException();
2584  CHECK(exception != nullptr);
2585  bool is_deoptimization = (exception == GetDeoptimizationException());
2586  if (!is_deoptimization) {
2587    // This is a real exception: let the instrumentation know about it.
2588    instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
2589    if (instrumentation->HasExceptionCaughtListeners() &&
2590        IsExceptionThrownByCurrentMethod(exception)) {
2591      // Instrumentation may cause GC so keep the exception object safe.
2592      StackHandleScope<1> hs(this);
2593      HandleWrapper<mirror::Throwable> h_exception(hs.NewHandleWrapper(&exception));
2594      instrumentation->ExceptionCaughtEvent(this, exception);
2595    }
2596    // Does instrumentation need to deoptimize the stack?
2597    // Note: we do this *after* reporting the exception to instrumentation in case it
2598    // now requires deoptimization. It may happen if a debugger is attached and requests
2599    // new events (single-step, breakpoint, ...) when the exception is reported.
2600    is_deoptimization = Dbg::IsForcedInterpreterNeededForException(this);
2601    if (is_deoptimization) {
2602      // Save the exception into the deoptimization context so it can be restored
2603      // before entering the interpreter.
2604      PushDeoptimizationContext(
2605          JValue(), /*is_reference */ false, /* from_code */ false, exception);
2606    }
2607  }
2608  // Don't leave exception visible while we try to find the handler, which may cause class
2609  // resolution.
2610  ClearException();
2611  QuickExceptionHandler exception_handler(this, is_deoptimization);
2612  if (is_deoptimization) {
2613    exception_handler.DeoptimizeStack();
2614  } else {
2615    exception_handler.FindCatch(exception);
2616  }
2617  exception_handler.UpdateInstrumentationStack();
2618  exception_handler.DoLongJump();
2619}
2620
2621Context* Thread::GetLongJumpContext() {
2622  Context* result = tlsPtr_.long_jump_context;
2623  if (result == nullptr) {
2624    result = Context::Create();
2625  } else {
2626    tlsPtr_.long_jump_context = nullptr;  // Avoid context being shared.
2627    result->Reset();
2628  }
2629  return result;
2630}
2631
2632// Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
2633//       so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
2634struct CurrentMethodVisitor FINAL : public StackVisitor {
2635  CurrentMethodVisitor(Thread* thread, Context* context, bool abort_on_error)
2636      SHARED_REQUIRES(Locks::mutator_lock_)
2637      : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2638        this_object_(nullptr),
2639        method_(nullptr),
2640        dex_pc_(0),
2641        abort_on_error_(abort_on_error) {}
2642  bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
2643    ArtMethod* m = GetMethod();
2644    if (m->IsRuntimeMethod()) {
2645      // Continue if this is a runtime method.
2646      return true;
2647    }
2648    if (context_ != nullptr) {
2649      this_object_ = GetThisObject();
2650    }
2651    method_ = m;
2652    dex_pc_ = GetDexPc(abort_on_error_);
2653    return false;
2654  }
2655  mirror::Object* this_object_;
2656  ArtMethod* method_;
2657  uint32_t dex_pc_;
2658  const bool abort_on_error_;
2659};
2660
2661ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error) const {
2662  CurrentMethodVisitor visitor(const_cast<Thread*>(this), nullptr, abort_on_error);
2663  visitor.WalkStack(false);
2664  if (dex_pc != nullptr) {
2665    *dex_pc = visitor.dex_pc_;
2666  }
2667  return visitor.method_;
2668}
2669
2670bool Thread::HoldsLock(mirror::Object* object) const {
2671  if (object == nullptr) {
2672    return false;
2673  }
2674  return object->GetLockOwnerThreadId() == GetThreadId();
2675}
2676
2677// RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor).
2678template <typename RootVisitor>
2679class ReferenceMapVisitor : public StackVisitor {
2680 public:
2681  ReferenceMapVisitor(Thread* thread, Context* context, RootVisitor& visitor)
2682      SHARED_REQUIRES(Locks::mutator_lock_)
2683        // We are visiting the references in compiled frames, so we do not need
2684        // to know the inlined frames.
2685      : StackVisitor(thread, context, StackVisitor::StackWalkKind::kSkipInlinedFrames),
2686        visitor_(visitor) {}
2687
2688  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
2689    if (false) {
2690      LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod())
2691                << StringPrintf("@ PC:%04x", GetDexPc());
2692    }
2693    ShadowFrame* shadow_frame = GetCurrentShadowFrame();
2694    if (shadow_frame != nullptr) {
2695      VisitShadowFrame(shadow_frame);
2696    } else {
2697      VisitQuickFrame();
2698    }
2699    return true;
2700  }
2701
2702  void VisitShadowFrame(ShadowFrame* shadow_frame) SHARED_REQUIRES(Locks::mutator_lock_) {
2703    ArtMethod* m = shadow_frame->GetMethod();
2704    VisitDeclaringClass(m);
2705    DCHECK(m != nullptr);
2706    size_t num_regs = shadow_frame->NumberOfVRegs();
2707    DCHECK(m->IsNative() || shadow_frame->HasReferenceArray());
2708    // handle scope for JNI or References for interpreter.
2709    for (size_t reg = 0; reg < num_regs; ++reg) {
2710      mirror::Object* ref = shadow_frame->GetVRegReference(reg);
2711      if (ref != nullptr) {
2712        mirror::Object* new_ref = ref;
2713        visitor_(&new_ref, reg, this);
2714        if (new_ref != ref) {
2715          shadow_frame->SetVRegReference(reg, new_ref);
2716        }
2717      }
2718    }
2719    // Mark lock count map required for structured locking checks.
2720    shadow_frame->GetLockCountData().VisitMonitors(visitor_, -1, this);
2721  }
2722
2723 private:
2724  // Visiting the declaring class is necessary so that we don't unload the class of a method that
2725  // is executing. We need to ensure that the code stays mapped. NO_THREAD_SAFETY_ANALYSIS since
2726  // the threads do not all hold the heap bitmap lock for parallel GC.
2727  void VisitDeclaringClass(ArtMethod* method)
2728      SHARED_REQUIRES(Locks::mutator_lock_)
2729      NO_THREAD_SAFETY_ANALYSIS {
2730    mirror::Class* klass = method->GetDeclaringClassUnchecked<kWithoutReadBarrier>();
2731    // klass can be null for runtime methods.
2732    if (klass != nullptr) {
2733      if (kVerifyImageObjectsMarked) {
2734        gc::Heap* const heap = Runtime::Current()->GetHeap();
2735        gc::space::ContinuousSpace* space = heap->FindContinuousSpaceFromObject(klass,
2736                                                                                /*fail_ok*/true);
2737        if (space != nullptr && space->IsImageSpace()) {
2738          bool failed = false;
2739          if (!space->GetLiveBitmap()->Test(klass)) {
2740            failed = true;
2741            LOG(INTERNAL_FATAL) << "Unmarked object in image " << *space;
2742          } else if (!heap->GetLiveBitmap()->Test(klass)) {
2743            failed = true;
2744            LOG(INTERNAL_FATAL) << "Unmarked object in image through live bitmap " << *space;
2745          }
2746          if (failed) {
2747            GetThread()->Dump(LOG(INTERNAL_FATAL));
2748            space->AsImageSpace()->DumpSections(LOG(INTERNAL_FATAL));
2749            LOG(INTERNAL_FATAL) << "Method@" << method->GetDexMethodIndex() << ":" << method
2750                                << " klass@" << klass;
2751            // Pretty info last in case it crashes.
2752            LOG(FATAL) << "Method " << PrettyMethod(method) << " klass " << PrettyClass(klass);
2753          }
2754        }
2755      }
2756      mirror::Object* new_ref = klass;
2757      visitor_(&new_ref, -1, this);
2758      if (new_ref != klass) {
2759        method->CASDeclaringClass(klass, new_ref->AsClass());
2760      }
2761    }
2762  }
2763
2764  void VisitQuickFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
2765    ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
2766    DCHECK(cur_quick_frame != nullptr);
2767    ArtMethod* m = *cur_quick_frame;
2768    VisitDeclaringClass(m);
2769
2770    // Process register map (which native and runtime methods don't have)
2771    if (!m->IsNative() && !m->IsRuntimeMethod() && (!m->IsProxyMethod() || m->IsConstructor())) {
2772      const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
2773      DCHECK(method_header->IsOptimized());
2774      auto* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>(
2775          reinterpret_cast<uintptr_t>(cur_quick_frame));
2776      uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
2777      CodeInfo code_info = method_header->GetOptimizedCodeInfo();
2778      CodeInfoEncoding encoding = code_info.ExtractEncoding();
2779      StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
2780      DCHECK(map.IsValid());
2781      // Visit stack entries that hold pointers.
2782      size_t number_of_bits = map.GetNumberOfStackMaskBits(encoding.stack_map_encoding);
2783      for (size_t i = 0; i < number_of_bits; ++i) {
2784        if (map.GetStackMaskBit(encoding.stack_map_encoding, i)) {
2785          auto* ref_addr = vreg_base + i;
2786          mirror::Object* ref = ref_addr->AsMirrorPtr();
2787          if (ref != nullptr) {
2788            mirror::Object* new_ref = ref;
2789            visitor_(&new_ref, -1, this);
2790            if (ref != new_ref) {
2791              ref_addr->Assign(new_ref);
2792            }
2793          }
2794        }
2795      }
2796      // Visit callee-save registers that hold pointers.
2797      uint32_t register_mask = map.GetRegisterMask(encoding.stack_map_encoding);
2798      for (size_t i = 0; i < BitSizeOf<uint32_t>(); ++i) {
2799        if (register_mask & (1 << i)) {
2800          mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i));
2801          if (*ref_addr != nullptr) {
2802            visitor_(ref_addr, -1, this);
2803          }
2804        }
2805      }
2806    }
2807  }
2808
2809  // Visitor for when we visit a root.
2810  RootVisitor& visitor_;
2811};
2812
2813class RootCallbackVisitor {
2814 public:
2815  RootCallbackVisitor(RootVisitor* visitor, uint32_t tid) : visitor_(visitor), tid_(tid) {}
2816
2817  void operator()(mirror::Object** obj, size_t vreg, const StackVisitor* stack_visitor) const
2818      SHARED_REQUIRES(Locks::mutator_lock_) {
2819    visitor_->VisitRoot(obj, JavaFrameRootInfo(tid_, stack_visitor, vreg));
2820  }
2821
2822 private:
2823  RootVisitor* const visitor_;
2824  const uint32_t tid_;
2825};
2826
2827void Thread::VisitRoots(RootVisitor* visitor) {
2828  const uint32_t thread_id = GetThreadId();
2829  visitor->VisitRootIfNonNull(&tlsPtr_.opeer, RootInfo(kRootThreadObject, thread_id));
2830  if (tlsPtr_.exception != nullptr && tlsPtr_.exception != GetDeoptimizationException()) {
2831    visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception),
2832                       RootInfo(kRootNativeStack, thread_id));
2833  }
2834  visitor->VisitRootIfNonNull(&tlsPtr_.monitor_enter_object, RootInfo(kRootNativeStack, thread_id));
2835  tlsPtr_.jni_env->locals.VisitRoots(visitor, RootInfo(kRootJNILocal, thread_id));
2836  tlsPtr_.jni_env->monitors.VisitRoots(visitor, RootInfo(kRootJNIMonitor, thread_id));
2837  HandleScopeVisitRoots(visitor, thread_id);
2838  if (tlsPtr_.debug_invoke_req != nullptr) {
2839    tlsPtr_.debug_invoke_req->VisitRoots(visitor, RootInfo(kRootDebugger, thread_id));
2840  }
2841  // Visit roots for deoptimization.
2842  if (tlsPtr_.stacked_shadow_frame_record != nullptr) {
2843    RootCallbackVisitor visitor_to_callback(visitor, thread_id);
2844    ReferenceMapVisitor<RootCallbackVisitor> mapper(this, nullptr, visitor_to_callback);
2845    for (StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record;
2846         record != nullptr;
2847         record = record->GetLink()) {
2848      for (ShadowFrame* shadow_frame = record->GetShadowFrame();
2849           shadow_frame != nullptr;
2850           shadow_frame = shadow_frame->GetLink()) {
2851        mapper.VisitShadowFrame(shadow_frame);
2852      }
2853    }
2854  }
2855  for (DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack;
2856       record != nullptr;
2857       record = record->GetLink()) {
2858    if (record->IsReference()) {
2859      visitor->VisitRootIfNonNull(record->GetReturnValueAsGCRoot(),
2860                                  RootInfo(kRootThreadObject, thread_id));
2861    }
2862    visitor->VisitRootIfNonNull(record->GetPendingExceptionAsGCRoot(),
2863                                RootInfo(kRootThreadObject, thread_id));
2864  }
2865  if (tlsPtr_.frame_id_to_shadow_frame != nullptr) {
2866    RootCallbackVisitor visitor_to_callback(visitor, thread_id);
2867    ReferenceMapVisitor<RootCallbackVisitor> mapper(this, nullptr, visitor_to_callback);
2868    for (FrameIdToShadowFrame* record = tlsPtr_.frame_id_to_shadow_frame;
2869         record != nullptr;
2870         record = record->GetNext()) {
2871      mapper.VisitShadowFrame(record->GetShadowFrame());
2872    }
2873  }
2874  for (auto* verifier = tlsPtr_.method_verifier; verifier != nullptr; verifier = verifier->link_) {
2875    verifier->VisitRoots(visitor, RootInfo(kRootNativeStack, thread_id));
2876  }
2877  // Visit roots on this thread's stack
2878  Context* context = GetLongJumpContext();
2879  RootCallbackVisitor visitor_to_callback(visitor, thread_id);
2880  ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context, visitor_to_callback);
2881  mapper.WalkStack();
2882  ReleaseLongJumpContext(context);
2883  for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
2884    visitor->VisitRootIfNonNull(&frame.this_object_, RootInfo(kRootVMInternal, thread_id));
2885  }
2886}
2887
2888class VerifyRootVisitor : public SingleRootVisitor {
2889 public:
2890  void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
2891      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
2892    VerifyObject(root);
2893  }
2894};
2895
2896void Thread::VerifyStackImpl() {
2897  VerifyRootVisitor visitor;
2898  std::unique_ptr<Context> context(Context::Create());
2899  RootCallbackVisitor visitor_to_callback(&visitor, GetThreadId());
2900  ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitor_to_callback);
2901  mapper.WalkStack();
2902}
2903
2904// Set the stack end to that to be used during a stack overflow
2905void Thread::SetStackEndForStackOverflow() {
2906  // During stack overflow we allow use of the full stack.
2907  if (tlsPtr_.stack_end == tlsPtr_.stack_begin) {
2908    // However, we seem to have already extended to use the full stack.
2909    LOG(ERROR) << "Need to increase kStackOverflowReservedBytes (currently "
2910               << GetStackOverflowReservedBytes(kRuntimeISA) << ")?";
2911    DumpStack(LOG(ERROR));
2912    LOG(FATAL) << "Recursive stack overflow.";
2913  }
2914
2915  tlsPtr_.stack_end = tlsPtr_.stack_begin;
2916
2917  // Remove the stack overflow protection if is it set up.
2918  bool implicit_stack_check = !Runtime::Current()->ExplicitStackOverflowChecks();
2919  if (implicit_stack_check) {
2920    if (!UnprotectStack()) {
2921      LOG(ERROR) << "Unable to remove stack protection for stack overflow";
2922    }
2923  }
2924}
2925
2926void Thread::SetTlab(uint8_t* start, uint8_t* end) {
2927  DCHECK_LE(start, end);
2928  tlsPtr_.thread_local_start = start;
2929  tlsPtr_.thread_local_pos  = tlsPtr_.thread_local_start;
2930  tlsPtr_.thread_local_end = end;
2931  tlsPtr_.thread_local_objects = 0;
2932}
2933
2934bool Thread::HasTlab() const {
2935  bool has_tlab = tlsPtr_.thread_local_pos != nullptr;
2936  if (has_tlab) {
2937    DCHECK(tlsPtr_.thread_local_start != nullptr && tlsPtr_.thread_local_end != nullptr);
2938  } else {
2939    DCHECK(tlsPtr_.thread_local_start == nullptr && tlsPtr_.thread_local_end == nullptr);
2940  }
2941  return has_tlab;
2942}
2943
2944std::ostream& operator<<(std::ostream& os, const Thread& thread) {
2945  thread.ShortDump(os);
2946  return os;
2947}
2948
2949bool Thread::ProtectStack(bool fatal_on_error) {
2950  void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
2951  VLOG(threads) << "Protecting stack at " << pregion;
2952  if (mprotect(pregion, kStackOverflowProtectedSize, PROT_NONE) == -1) {
2953    if (fatal_on_error) {
2954      LOG(FATAL) << "Unable to create protected region in stack for implicit overflow check. "
2955          "Reason: "
2956          << strerror(errno) << " size:  " << kStackOverflowProtectedSize;
2957    }
2958    return false;
2959  }
2960  return true;
2961}
2962
2963bool Thread::UnprotectStack() {
2964  void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
2965  VLOG(threads) << "Unprotecting stack at " << pregion;
2966  return mprotect(pregion, kStackOverflowProtectedSize, PROT_READ|PROT_WRITE) == 0;
2967}
2968
2969void Thread::ActivateSingleStepControl(SingleStepControl* ssc) {
2970  CHECK(Dbg::IsDebuggerActive());
2971  CHECK(GetSingleStepControl() == nullptr) << "Single step already active in thread " << *this;
2972  CHECK(ssc != nullptr);
2973  tlsPtr_.single_step_control = ssc;
2974}
2975
2976void Thread::DeactivateSingleStepControl() {
2977  CHECK(Dbg::IsDebuggerActive());
2978  CHECK(GetSingleStepControl() != nullptr) << "Single step not active in thread " << *this;
2979  SingleStepControl* ssc = GetSingleStepControl();
2980  tlsPtr_.single_step_control = nullptr;
2981  delete ssc;
2982}
2983
2984void Thread::SetDebugInvokeReq(DebugInvokeReq* req) {
2985  CHECK(Dbg::IsDebuggerActive());
2986  CHECK(GetInvokeReq() == nullptr) << "Debug invoke req already active in thread " << *this;
2987  CHECK(Thread::Current() != this) << "Debug invoke can't be dispatched by the thread itself";
2988  CHECK(req != nullptr);
2989  tlsPtr_.debug_invoke_req = req;
2990}
2991
2992void Thread::ClearDebugInvokeReq() {
2993  CHECK(GetInvokeReq() != nullptr) << "Debug invoke req not active in thread " << *this;
2994  CHECK(Thread::Current() == this) << "Debug invoke must be finished by the thread itself";
2995  DebugInvokeReq* req = tlsPtr_.debug_invoke_req;
2996  tlsPtr_.debug_invoke_req = nullptr;
2997  delete req;
2998}
2999
3000void Thread::PushVerifier(verifier::MethodVerifier* verifier) {
3001  verifier->link_ = tlsPtr_.method_verifier;
3002  tlsPtr_.method_verifier = verifier;
3003}
3004
3005void Thread::PopVerifier(verifier::MethodVerifier* verifier) {
3006  CHECK_EQ(tlsPtr_.method_verifier, verifier);
3007  tlsPtr_.method_verifier = verifier->link_;
3008}
3009
3010size_t Thread::NumberOfHeldMutexes() const {
3011  size_t count = 0;
3012  for (BaseMutex* mu : tlsPtr_.held_mutexes) {
3013    count += mu != nullptr ? 1 : 0;
3014  }
3015  return count;
3016}
3017
3018void Thread::DeoptimizeWithDeoptimizationException(JValue* result) {
3019  DCHECK_EQ(GetException(), Thread::GetDeoptimizationException());
3020  ClearException();
3021  ShadowFrame* shadow_frame =
3022      PopStackedShadowFrame(StackedShadowFrameType::kDeoptimizationShadowFrame);
3023  mirror::Throwable* pending_exception = nullptr;
3024  bool from_code = false;
3025  PopDeoptimizationContext(result, &pending_exception, &from_code);
3026  CHECK(!from_code) << "Deoptimizing from code should be done with single frame deoptimization";
3027  SetTopOfStack(nullptr);
3028  SetTopOfShadowStack(shadow_frame);
3029
3030  // Restore the exception that was pending before deoptimization then interpret the
3031  // deoptimized frames.
3032  if (pending_exception != nullptr) {
3033    SetException(pending_exception);
3034  }
3035  interpreter::EnterInterpreterFromDeoptimize(this, shadow_frame, from_code, result);
3036}
3037
3038void Thread::SetException(mirror::Throwable* new_exception) {
3039  CHECK(new_exception != nullptr);
3040  // TODO: DCHECK(!IsExceptionPending());
3041  tlsPtr_.exception = new_exception;
3042  // LOG(ERROR) << new_exception->Dump();
3043}
3044
3045}  // namespace art
3046