runtime.cc revision e5071cc58a3cdea4a88257be3c8fd4d012a64c74
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "runtime.h"
18
19// sys/mount.h has to come before linux/fs.h due to redefinition of MS_RDONLY, MS_BIND, etc
20#include <sys/mount.h>
21#ifdef __linux__
22#include <linux/fs.h>
23#include <sys/prctl.h>
24#endif
25
26#include <signal.h>
27#include <sys/syscall.h>
28#include "base/memory_tool.h"
29#if defined(__APPLE__)
30#include <crt_externs.h>  // for _NSGetEnviron
31#endif
32
33#include <cstdio>
34#include <cstdlib>
35#include <limits>
36#include <memory_representation.h>
37#include <vector>
38#include <fcntl.h>
39
40#include "android-base/strings.h"
41
42#include "aot_class_linker.h"
43#include "arch/arm/quick_method_frame_info_arm.h"
44#include "arch/arm/registers_arm.h"
45#include "arch/arm64/quick_method_frame_info_arm64.h"
46#include "arch/arm64/registers_arm64.h"
47#include "arch/instruction_set_features.h"
48#include "arch/mips/quick_method_frame_info_mips.h"
49#include "arch/mips/registers_mips.h"
50#include "arch/mips64/quick_method_frame_info_mips64.h"
51#include "arch/mips64/registers_mips64.h"
52#include "arch/x86/quick_method_frame_info_x86.h"
53#include "arch/x86/registers_x86.h"
54#include "arch/x86_64/quick_method_frame_info_x86_64.h"
55#include "arch/x86_64/registers_x86_64.h"
56#include "art_field-inl.h"
57#include "art_method-inl.h"
58#include "asm_support.h"
59#include "asm_support_check.h"
60#include "atomic.h"
61#include "base/arena_allocator.h"
62#include "base/dumpable.h"
63#include "base/enums.h"
64#include "base/stl_util.h"
65#include "base/systrace.h"
66#include "base/unix_file/fd_file.h"
67#include "class_linker-inl.h"
68#include "compiler_callbacks.h"
69#include "debugger.h"
70#include "elf_file.h"
71#include "entrypoints/runtime_asm_entrypoints.h"
72#include "experimental_flags.h"
73#include "fault_handler.h"
74#include "gc/accounting/card_table-inl.h"
75#include "gc/heap.h"
76#include "gc/scoped_gc_critical_section.h"
77#include "gc/space/image_space.h"
78#include "gc/space/space-inl.h"
79#include "gc/system_weak.h"
80#include "handle_scope-inl.h"
81#include "image-inl.h"
82#include "instrumentation.h"
83#include "intern_table.h"
84#include "interpreter/interpreter.h"
85#include "java_vm_ext.h"
86#include "jit/jit.h"
87#include "jit/jit_code_cache.h"
88#include "jit/profile_saver.h"
89#include "jni_internal.h"
90#include "linear_alloc.h"
91#include "mirror/array.h"
92#include "mirror/class-inl.h"
93#include "mirror/class_ext.h"
94#include "mirror/class_loader.h"
95#include "mirror/emulated_stack_frame.h"
96#include "mirror/field.h"
97#include "mirror/method.h"
98#include "mirror/method_handle_impl.h"
99#include "mirror/method_handles_lookup.h"
100#include "mirror/method_type.h"
101#include "mirror/stack_trace_element.h"
102#include "mirror/throwable.h"
103#include "monitor.h"
104#include "native/dalvik_system_DexFile.h"
105#include "native/dalvik_system_VMDebug.h"
106#include "native/dalvik_system_VMRuntime.h"
107#include "native/dalvik_system_VMStack.h"
108#include "native/dalvik_system_ZygoteHooks.h"
109#include "native/java_lang_Class.h"
110#include "native/java_lang_Object.h"
111#include "native/java_lang_String.h"
112#include "native/java_lang_StringFactory.h"
113#include "native/java_lang_System.h"
114#include "native/java_lang_Thread.h"
115#include "native/java_lang_Throwable.h"
116#include "native/java_lang_VMClassLoader.h"
117#include "native/java_lang_Void.h"
118#include "native/java_lang_invoke_MethodHandleImpl.h"
119#include "native/java_lang_ref_FinalizerReference.h"
120#include "native/java_lang_ref_Reference.h"
121#include "native/java_lang_reflect_Array.h"
122#include "native/java_lang_reflect_Constructor.h"
123#include "native/java_lang_reflect_Executable.h"
124#include "native/java_lang_reflect_Field.h"
125#include "native/java_lang_reflect_Method.h"
126#include "native/java_lang_reflect_Parameter.h"
127#include "native/java_lang_reflect_Proxy.h"
128#include "native/java_util_concurrent_atomic_AtomicLong.h"
129#include "native/libcore_util_CharsetUtils.h"
130#include "native/org_apache_harmony_dalvik_ddmc_DdmServer.h"
131#include "native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.h"
132#include "native/sun_misc_Unsafe.h"
133#include "native_bridge_art_interface.h"
134#include "native_stack_dump.h"
135#include "nativehelper/JniConstants.h"
136#include "nativehelper/ScopedLocalRef.h"
137#include "oat_file.h"
138#include "oat_file_manager.h"
139#include "object_callbacks.h"
140#include "os.h"
141#include "parsed_options.h"
142#include "quick/quick_method_frame_info.h"
143#include "reflection.h"
144#include "runtime_callbacks.h"
145#include "runtime_options.h"
146#include "scoped_thread_state_change-inl.h"
147#include "sigchain.h"
148#include "signal_catcher.h"
149#include "signal_set.h"
150#include "thread.h"
151#include "thread_list.h"
152#include "ti/agent.h"
153#include "trace.h"
154#include "transaction.h"
155#include "utils.h"
156#include "vdex_file.h"
157#include "verifier/method_verifier.h"
158#include "well_known_classes.h"
159
160#ifdef ART_TARGET_ANDROID
161#include <android/set_abort_message.h>
162#endif
163
164namespace art {
165
166// If a signal isn't handled properly, enable a handler that attempts to dump the Java stack.
167static constexpr bool kEnableJavaStackTraceHandler = false;
168// Tuned by compiling GmsCore under perf and measuring time spent in DescriptorEquals for class
169// linking.
170static constexpr double kLowMemoryMinLoadFactor = 0.5;
171static constexpr double kLowMemoryMaxLoadFactor = 0.8;
172static constexpr double kNormalMinLoadFactor = 0.4;
173static constexpr double kNormalMaxLoadFactor = 0.7;
174Runtime* Runtime::instance_ = nullptr;
175
176struct TraceConfig {
177  Trace::TraceMode trace_mode;
178  Trace::TraceOutputMode trace_output_mode;
179  std::string trace_file;
180  size_t trace_file_size;
181};
182
183namespace {
184#ifdef __APPLE__
185inline char** GetEnviron() {
186  // When Google Test is built as a framework on MacOS X, the environ variable
187  // is unavailable. Apple's documentation (man environ) recommends using
188  // _NSGetEnviron() instead.
189  return *_NSGetEnviron();
190}
191#else
192// Some POSIX platforms expect you to declare environ. extern "C" makes
193// it reside in the global namespace.
194extern "C" char** environ;
195inline char** GetEnviron() { return environ; }
196#endif
197}  // namespace
198
199Runtime::Runtime()
200    : resolution_method_(nullptr),
201      imt_conflict_method_(nullptr),
202      imt_unimplemented_method_(nullptr),
203      instruction_set_(kNone),
204      compiler_callbacks_(nullptr),
205      is_zygote_(false),
206      must_relocate_(false),
207      is_concurrent_gc_enabled_(true),
208      is_explicit_gc_disabled_(false),
209      dex2oat_enabled_(true),
210      image_dex2oat_enabled_(true),
211      default_stack_size_(0),
212      heap_(nullptr),
213      max_spins_before_thin_lock_inflation_(Monitor::kDefaultMaxSpinsBeforeThinLockInflation),
214      monitor_list_(nullptr),
215      monitor_pool_(nullptr),
216      thread_list_(nullptr),
217      intern_table_(nullptr),
218      class_linker_(nullptr),
219      signal_catcher_(nullptr),
220      use_tombstoned_traces_(false),
221      java_vm_(nullptr),
222      fault_message_lock_("Fault message lock"),
223      fault_message_(""),
224      threads_being_born_(0),
225      shutdown_cond_(new ConditionVariable("Runtime shutdown", *Locks::runtime_shutdown_lock_)),
226      shutting_down_(false),
227      shutting_down_started_(false),
228      started_(false),
229      finished_starting_(false),
230      vfprintf_(nullptr),
231      exit_(nullptr),
232      abort_(nullptr),
233      stats_enabled_(false),
234      is_running_on_memory_tool_(RUNNING_ON_MEMORY_TOOL),
235      instrumentation_(),
236      main_thread_group_(nullptr),
237      system_thread_group_(nullptr),
238      system_class_loader_(nullptr),
239      dump_gc_performance_on_shutdown_(false),
240      preinitialization_transaction_(nullptr),
241      verify_(verifier::VerifyMode::kNone),
242      allow_dex_file_fallback_(true),
243      target_sdk_version_(0),
244      implicit_null_checks_(false),
245      implicit_so_checks_(false),
246      implicit_suspend_checks_(false),
247      no_sig_chain_(false),
248      force_native_bridge_(false),
249      is_native_bridge_loaded_(false),
250      is_native_debuggable_(false),
251      is_java_debuggable_(false),
252      zygote_max_failed_boots_(0),
253      experimental_flags_(ExperimentalFlags::kNone),
254      oat_file_manager_(nullptr),
255      is_low_memory_mode_(false),
256      safe_mode_(false),
257      dump_native_stack_on_sig_quit_(true),
258      pruned_dalvik_cache_(false),
259      // Initially assume we perceive jank in case the process state is never updated.
260      process_state_(kProcessStateJankPerceptible),
261      zygote_no_threads_(false) {
262  static_assert(Runtime::kCalleeSaveSize ==
263                    static_cast<uint32_t>(CalleeSaveType::kLastCalleeSaveType), "Unexpected size");
264
265  CheckAsmSupportOffsetsAndSizes();
266  std::fill(callee_save_methods_, callee_save_methods_ + arraysize(callee_save_methods_), 0u);
267  interpreter::CheckInterpreterAsmConstants();
268  callbacks_.reset(new RuntimeCallbacks());
269  for (size_t i = 0; i <= static_cast<size_t>(DeoptimizationKind::kLast); ++i) {
270    deoptimization_counts_[i] = 0u;
271  }
272}
273
274Runtime::~Runtime() {
275  ScopedTrace trace("Runtime shutdown");
276  if (is_native_bridge_loaded_) {
277    UnloadNativeBridge();
278  }
279
280  Thread* self = Thread::Current();
281  const bool attach_shutdown_thread = self == nullptr;
282  if (attach_shutdown_thread) {
283    CHECK(AttachCurrentThread("Shutdown thread", false, nullptr, false));
284    self = Thread::Current();
285  } else {
286    LOG(WARNING) << "Current thread not detached in Runtime shutdown";
287  }
288
289  if (dump_gc_performance_on_shutdown_) {
290    // This can't be called from the Heap destructor below because it
291    // could call RosAlloc::InspectAll() which needs the thread_list
292    // to be still alive.
293    heap_->DumpGcPerformanceInfo(LOG_STREAM(INFO));
294  }
295
296  if (jit_ != nullptr) {
297    // Stop the profile saver thread before marking the runtime as shutting down.
298    // The saver will try to dump the profiles before being sopped and that
299    // requires holding the mutator lock.
300    jit_->StopProfileSaver();
301  }
302
303  {
304    ScopedTrace trace2("Wait for shutdown cond");
305    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
306    shutting_down_started_ = true;
307    while (threads_being_born_ > 0) {
308      shutdown_cond_->Wait(self);
309    }
310    shutting_down_ = true;
311  }
312  // Shutdown and wait for the daemons.
313  CHECK(self != nullptr);
314  if (IsFinishedStarting()) {
315    ScopedTrace trace2("Waiting for Daemons");
316    self->ClearException();
317    self->GetJniEnv()->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
318                                            WellKnownClasses::java_lang_Daemons_stop);
319  }
320
321  Trace::Shutdown();
322
323  // Report death. Clients me require a working thread, still, so do it before GC completes and
324  // all non-daemon threads are done.
325  {
326    ScopedObjectAccess soa(self);
327    callbacks_->NextRuntimePhase(RuntimePhaseCallback::RuntimePhase::kDeath);
328  }
329
330  if (attach_shutdown_thread) {
331    DetachCurrentThread();
332    self = nullptr;
333  }
334
335  // Make sure to let the GC complete if it is running.
336  heap_->WaitForGcToComplete(gc::kGcCauseBackground, self);
337  heap_->DeleteThreadPool();
338  if (jit_ != nullptr) {
339    ScopedTrace trace2("Delete jit");
340    VLOG(jit) << "Deleting jit thread pool";
341    // Delete thread pool before the thread list since we don't want to wait forever on the
342    // JIT compiler threads.
343    jit_->DeleteThreadPool();
344  }
345
346  // Make sure our internal threads are dead before we start tearing down things they're using.
347  Dbg::StopJdwp();
348  delete signal_catcher_;
349
350  // Make sure all other non-daemon threads have terminated, and all daemon threads are suspended.
351  {
352    ScopedTrace trace2("Delete thread list");
353    thread_list_->ShutDown();
354  }
355
356  // TODO Maybe do some locking.
357  for (auto& agent : agents_) {
358    agent.Unload();
359  }
360
361  // TODO Maybe do some locking
362  for (auto& plugin : plugins_) {
363    plugin.Unload();
364  }
365
366  // Finally delete the thread list.
367  delete thread_list_;
368
369  // Delete the JIT after thread list to ensure that there is no remaining threads which could be
370  // accessing the instrumentation when we delete it.
371  if (jit_ != nullptr) {
372    VLOG(jit) << "Deleting jit";
373    jit_.reset(nullptr);
374  }
375
376  // Shutdown the fault manager if it was initialized.
377  fault_manager.Shutdown();
378
379  ScopedTrace trace2("Delete state");
380  delete monitor_list_;
381  delete monitor_pool_;
382  delete class_linker_;
383  delete heap_;
384  delete intern_table_;
385  delete oat_file_manager_;
386  Thread::Shutdown();
387  QuasiAtomic::Shutdown();
388  verifier::MethodVerifier::Shutdown();
389
390  // Destroy allocators before shutting down the MemMap because they may use it.
391  java_vm_.reset();
392  linear_alloc_.reset();
393  low_4gb_arena_pool_.reset();
394  arena_pool_.reset();
395  jit_arena_pool_.reset();
396  protected_fault_page_.reset();
397  MemMap::Shutdown();
398
399  // TODO: acquire a static mutex on Runtime to avoid racing.
400  CHECK(instance_ == nullptr || instance_ == this);
401  instance_ = nullptr;
402}
403
404struct AbortState {
405  void Dump(std::ostream& os) const {
406    if (gAborting > 1) {
407      os << "Runtime aborting --- recursively, so no thread-specific detail!\n";
408      DumpRecursiveAbort(os);
409      return;
410    }
411    gAborting++;
412    os << "Runtime aborting...\n";
413    if (Runtime::Current() == nullptr) {
414      os << "(Runtime does not yet exist!)\n";
415      DumpNativeStack(os, GetTid(), nullptr, "  native: ", nullptr);
416      return;
417    }
418    Thread* self = Thread::Current();
419    if (self == nullptr) {
420      os << "(Aborting thread was not attached to runtime!)\n";
421      DumpKernelStack(os, GetTid(), "  kernel: ", false);
422      DumpNativeStack(os, GetTid(), nullptr, "  native: ", nullptr);
423    } else {
424      os << "Aborting thread:\n";
425      if (Locks::mutator_lock_->IsExclusiveHeld(self) || Locks::mutator_lock_->IsSharedHeld(self)) {
426        DumpThread(os, self);
427      } else {
428        if (Locks::mutator_lock_->SharedTryLock(self)) {
429          DumpThread(os, self);
430          Locks::mutator_lock_->SharedUnlock(self);
431        }
432      }
433    }
434    DumpAllThreads(os, self);
435  }
436
437  // No thread-safety analysis as we do explicitly test for holding the mutator lock.
438  void DumpThread(std::ostream& os, Thread* self) const NO_THREAD_SAFETY_ANALYSIS {
439    DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self) || Locks::mutator_lock_->IsSharedHeld(self));
440    self->Dump(os);
441    if (self->IsExceptionPending()) {
442      mirror::Throwable* exception = self->GetException();
443      os << "Pending exception " << exception->Dump();
444    }
445  }
446
447  void DumpAllThreads(std::ostream& os, Thread* self) const {
448    Runtime* runtime = Runtime::Current();
449    if (runtime != nullptr) {
450      ThreadList* thread_list = runtime->GetThreadList();
451      if (thread_list != nullptr) {
452        bool tll_already_held = Locks::thread_list_lock_->IsExclusiveHeld(self);
453        bool ml_already_held = Locks::mutator_lock_->IsSharedHeld(self);
454        if (!tll_already_held || !ml_already_held) {
455          os << "Dumping all threads without appropriate locks held:"
456              << (!tll_already_held ? " thread list lock" : "")
457              << (!ml_already_held ? " mutator lock" : "")
458              << "\n";
459        }
460        os << "All threads:\n";
461        thread_list->Dump(os);
462      }
463    }
464  }
465
466  // For recursive aborts.
467  void DumpRecursiveAbort(std::ostream& os) const NO_THREAD_SAFETY_ANALYSIS {
468    // The only thing we'll attempt is dumping the native stack of the current thread. We will only
469    // try this if we haven't exceeded an arbitrary amount of recursions, to recover and actually
470    // die.
471    // Note: as we're using a global counter for the recursive abort detection, there is a potential
472    //       race here and it is not OK to just print when the counter is "2" (one from
473    //       Runtime::Abort(), one from previous Dump() call). Use a number that seems large enough.
474    static constexpr size_t kOnlyPrintWhenRecursionLessThan = 100u;
475    if (gAborting < kOnlyPrintWhenRecursionLessThan) {
476      gAborting++;
477      DumpNativeStack(os, GetTid());
478    }
479  }
480};
481
482void Runtime::Abort(const char* msg) {
483  auto old_value = gAborting.fetch_add(1);  // set before taking any locks
484
485#ifdef ART_TARGET_ANDROID
486  if (old_value == 0) {
487    // Only set the first abort message.
488    android_set_abort_message(msg);
489  }
490#else
491  UNUSED(old_value);
492#endif
493
494#ifdef ART_TARGET_ANDROID
495  android_set_abort_message(msg);
496#endif
497
498  // Ensure that we don't have multiple threads trying to abort at once,
499  // which would result in significantly worse diagnostics.
500  MutexLock mu(Thread::Current(), *Locks::abort_lock_);
501
502  // Get any pending output out of the way.
503  fflush(nullptr);
504
505  // Many people have difficulty distinguish aborts from crashes,
506  // so be explicit.
507  // Note: use cerr on the host to print log lines immediately, so we get at least some output
508  //       in case of recursive aborts. We lose annotation with the source file and line number
509  //       here, which is a minor issue. The same is significantly more complicated on device,
510  //       which is why we ignore the issue there.
511  AbortState state;
512  if (kIsTargetBuild) {
513    LOG(FATAL_WITHOUT_ABORT) << Dumpable<AbortState>(state);
514  } else {
515    std::cerr << Dumpable<AbortState>(state);
516  }
517
518  // Sometimes we dump long messages, and the Android abort message only retains the first line.
519  // In those cases, just log the message again, to avoid logcat limits.
520  if (msg != nullptr && strchr(msg, '\n') != nullptr) {
521    LOG(FATAL_WITHOUT_ABORT) << msg;
522  }
523
524  // Call the abort hook if we have one.
525  if (Runtime::Current() != nullptr && Runtime::Current()->abort_ != nullptr) {
526    LOG(FATAL_WITHOUT_ABORT) << "Calling abort hook...";
527    Runtime::Current()->abort_();
528    // notreached
529    LOG(FATAL_WITHOUT_ABORT) << "Unexpectedly returned from abort hook!";
530  }
531
532#if defined(__GLIBC__)
533  // TODO: we ought to be able to use pthread_kill(3) here (or abort(3),
534  // which POSIX defines in terms of raise(3), which POSIX defines in terms
535  // of pthread_kill(3)). On Linux, though, libcorkscrew can't unwind through
536  // libpthread, which means the stacks we dump would be useless. Calling
537  // tgkill(2) directly avoids that.
538  syscall(__NR_tgkill, getpid(), GetTid(), SIGABRT);
539  // TODO: LLVM installs it's own SIGABRT handler so exit to be safe... Can we disable that in LLVM?
540  // If not, we could use sigaction(3) before calling tgkill(2) and lose this call to exit(3).
541  exit(1);
542#else
543  abort();
544#endif
545  // notreached
546}
547
548void Runtime::PreZygoteFork() {
549  heap_->PreZygoteFork();
550}
551
552void Runtime::CallExitHook(jint status) {
553  if (exit_ != nullptr) {
554    ScopedThreadStateChange tsc(Thread::Current(), kNative);
555    exit_(status);
556    LOG(WARNING) << "Exit hook returned instead of exiting!";
557  }
558}
559
560void Runtime::SweepSystemWeaks(IsMarkedVisitor* visitor) {
561  GetInternTable()->SweepInternTableWeaks(visitor);
562  GetMonitorList()->SweepMonitorList(visitor);
563  GetJavaVM()->SweepJniWeakGlobals(visitor);
564  GetHeap()->SweepAllocationRecords(visitor);
565  if (GetJit() != nullptr) {
566    // Visit JIT literal tables. Objects in these tables are classes and strings
567    // and only classes can be affected by class unloading. The strings always
568    // stay alive as they are strongly interned.
569    // TODO: Move this closer to CleanupClassLoaders, to avoid blocking weak accesses
570    // from mutators. See b/32167580.
571    GetJit()->GetCodeCache()->SweepRootTables(visitor);
572  }
573
574  // All other generic system-weak holders.
575  for (gc::AbstractSystemWeakHolder* holder : system_weak_holders_) {
576    holder->Sweep(visitor);
577  }
578}
579
580bool Runtime::ParseOptions(const RuntimeOptions& raw_options,
581                           bool ignore_unrecognized,
582                           RuntimeArgumentMap* runtime_options) {
583  InitLogging(/* argv */ nullptr, Abort);  // Calls Locks::Init() as a side effect.
584  bool parsed = ParsedOptions::Parse(raw_options, ignore_unrecognized, runtime_options);
585  if (!parsed) {
586    LOG(ERROR) << "Failed to parse options";
587    return false;
588  }
589  return true;
590}
591
592// Callback to check whether it is safe to call Abort (e.g., to use a call to
593// LOG(FATAL)).  It is only safe to call Abort if the runtime has been created,
594// properly initialized, and has not shut down.
595static bool IsSafeToCallAbort() NO_THREAD_SAFETY_ANALYSIS {
596  Runtime* runtime = Runtime::Current();
597  return runtime != nullptr && runtime->IsStarted() && !runtime->IsShuttingDownLocked();
598}
599
600bool Runtime::Create(RuntimeArgumentMap&& runtime_options) {
601  // TODO: acquire a static mutex on Runtime to avoid racing.
602  if (Runtime::instance_ != nullptr) {
603    return false;
604  }
605  instance_ = new Runtime;
606  Locks::SetClientCallback(IsSafeToCallAbort);
607  if (!instance_->Init(std::move(runtime_options))) {
608    // TODO: Currently deleting the instance will abort the runtime on destruction. Now This will
609    // leak memory, instead. Fix the destructor. b/19100793.
610    // delete instance_;
611    instance_ = nullptr;
612    return false;
613  }
614  return true;
615}
616
617bool Runtime::Create(const RuntimeOptions& raw_options, bool ignore_unrecognized) {
618  RuntimeArgumentMap runtime_options;
619  return ParseOptions(raw_options, ignore_unrecognized, &runtime_options) &&
620      Create(std::move(runtime_options));
621}
622
623static jobject CreateSystemClassLoader(Runtime* runtime) {
624  if (runtime->IsAotCompiler() && !runtime->GetCompilerCallbacks()->IsBootImage()) {
625    return nullptr;
626  }
627
628  ScopedObjectAccess soa(Thread::Current());
629  ClassLinker* cl = Runtime::Current()->GetClassLinker();
630  auto pointer_size = cl->GetImagePointerSize();
631
632  StackHandleScope<2> hs(soa.Self());
633  Handle<mirror::Class> class_loader_class(
634      hs.NewHandle(soa.Decode<mirror::Class>(WellKnownClasses::java_lang_ClassLoader)));
635  CHECK(cl->EnsureInitialized(soa.Self(), class_loader_class, true, true));
636
637  ArtMethod* getSystemClassLoader = class_loader_class->FindClassMethod(
638      "getSystemClassLoader", "()Ljava/lang/ClassLoader;", pointer_size);
639  CHECK(getSystemClassLoader != nullptr);
640  CHECK(getSystemClassLoader->IsStatic());
641
642  JValue result = InvokeWithJValues(soa,
643                                    nullptr,
644                                    jni::EncodeArtMethod(getSystemClassLoader),
645                                    nullptr);
646  JNIEnv* env = soa.Self()->GetJniEnv();
647  ScopedLocalRef<jobject> system_class_loader(env, soa.AddLocalReference<jobject>(result.GetL()));
648  CHECK(system_class_loader.get() != nullptr);
649
650  soa.Self()->SetClassLoaderOverride(system_class_loader.get());
651
652  Handle<mirror::Class> thread_class(
653      hs.NewHandle(soa.Decode<mirror::Class>(WellKnownClasses::java_lang_Thread)));
654  CHECK(cl->EnsureInitialized(soa.Self(), thread_class, true, true));
655
656  ArtField* contextClassLoader =
657      thread_class->FindDeclaredInstanceField("contextClassLoader", "Ljava/lang/ClassLoader;");
658  CHECK(contextClassLoader != nullptr);
659
660  // We can't run in a transaction yet.
661  contextClassLoader->SetObject<false>(
662      soa.Self()->GetPeer(),
663      soa.Decode<mirror::ClassLoader>(system_class_loader.get()).Ptr());
664
665  return env->NewGlobalRef(system_class_loader.get());
666}
667
668std::string Runtime::GetPatchoatExecutable() const {
669  if (!patchoat_executable_.empty()) {
670    return patchoat_executable_;
671  }
672  std::string patchoat_executable(GetAndroidRoot());
673  patchoat_executable += (kIsDebugBuild ? "/bin/patchoatd" : "/bin/patchoat");
674  return patchoat_executable;
675}
676
677std::string Runtime::GetCompilerExecutable() const {
678  if (!compiler_executable_.empty()) {
679    return compiler_executable_;
680  }
681  std::string compiler_executable(GetAndroidRoot());
682  compiler_executable += (kIsDebugBuild ? "/bin/dex2oatd" : "/bin/dex2oat");
683  return compiler_executable;
684}
685
686bool Runtime::Start() {
687  VLOG(startup) << "Runtime::Start entering";
688
689  CHECK(!no_sig_chain_) << "A started runtime should have sig chain enabled";
690
691  // If a debug host build, disable ptrace restriction for debugging and test timeout thread dump.
692  // Only 64-bit as prctl() may fail in 32 bit userspace on a 64-bit kernel.
693#if defined(__linux__) && !defined(ART_TARGET_ANDROID) && defined(__x86_64__)
694  if (kIsDebugBuild) {
695    CHECK_EQ(prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY), 0);
696  }
697#endif
698
699  // Restore main thread state to kNative as expected by native code.
700  Thread* self = Thread::Current();
701
702  self->TransitionFromRunnableToSuspended(kNative);
703
704  started_ = true;
705
706  if (!IsImageDex2OatEnabled() || !GetHeap()->HasBootImageSpace()) {
707    ScopedObjectAccess soa(self);
708    StackHandleScope<2> hs(soa.Self());
709
710    auto class_class(hs.NewHandle<mirror::Class>(mirror::Class::GetJavaLangClass()));
711    auto field_class(hs.NewHandle<mirror::Class>(mirror::Field::StaticClass()));
712
713    class_linker_->EnsureInitialized(soa.Self(), class_class, true, true);
714    // Field class is needed for register_java_net_InetAddress in libcore, b/28153851.
715    class_linker_->EnsureInitialized(soa.Self(), field_class, true, true);
716  }
717
718  // InitNativeMethods needs to be after started_ so that the classes
719  // it touches will have methods linked to the oat file if necessary.
720  {
721    ScopedTrace trace2("InitNativeMethods");
722    InitNativeMethods();
723  }
724
725  // Initialize well known thread group values that may be accessed threads while attaching.
726  InitThreadGroups(self);
727
728  Thread::FinishStartup();
729
730  // Create the JIT either if we have to use JIT compilation or save profiling info. This is
731  // done after FinishStartup as the JIT pool needs Java thread peers, which require the main
732  // ThreadGroup to exist.
733  //
734  // TODO(calin): We use the JIT class as a proxy for JIT compilation and for
735  // recoding profiles. Maybe we should consider changing the name to be more clear it's
736  // not only about compiling. b/28295073.
737  if (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) {
738    std::string error_msg;
739    if (!IsZygote()) {
740    // If we are the zygote then we need to wait until after forking to create the code cache
741    // due to SELinux restrictions on r/w/x memory regions.
742      CreateJit();
743    } else if (jit_options_->UseJitCompilation()) {
744      if (!jit::Jit::LoadCompilerLibrary(&error_msg)) {
745        // Try to load compiler pre zygote to reduce PSS. b/27744947
746        LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg;
747      }
748    }
749  }
750
751  // Send the start phase event. We have to wait till here as this is when the main thread peer
752  // has just been generated, important root clinits have been run and JNI is completely functional.
753  {
754    ScopedObjectAccess soa(self);
755    callbacks_->NextRuntimePhase(RuntimePhaseCallback::RuntimePhase::kStart);
756  }
757
758  system_class_loader_ = CreateSystemClassLoader(this);
759
760  if (!is_zygote_) {
761    if (is_native_bridge_loaded_) {
762      PreInitializeNativeBridge(".");
763    }
764    NativeBridgeAction action = force_native_bridge_
765        ? NativeBridgeAction::kInitialize
766        : NativeBridgeAction::kUnload;
767    InitNonZygoteOrPostFork(self->GetJniEnv(),
768                            /* is_system_server */ false,
769                            action,
770                            GetInstructionSetString(kRuntimeISA));
771  }
772
773  // Send the initialized phase event. Send it before starting daemons, as otherwise
774  // sending thread events becomes complicated.
775  {
776    ScopedObjectAccess soa(self);
777    callbacks_->NextRuntimePhase(RuntimePhaseCallback::RuntimePhase::kInit);
778  }
779
780  StartDaemonThreads();
781
782  {
783    ScopedObjectAccess soa(self);
784    self->GetJniEnv()->locals.AssertEmpty();
785  }
786
787  VLOG(startup) << "Runtime::Start exiting";
788  finished_starting_ = true;
789
790  if (trace_config_.get() != nullptr && trace_config_->trace_file != "") {
791    ScopedThreadStateChange tsc(self, kWaitingForMethodTracingStart);
792    Trace::Start(trace_config_->trace_file.c_str(),
793                 -1,
794                 static_cast<int>(trace_config_->trace_file_size),
795                 0,
796                 trace_config_->trace_output_mode,
797                 trace_config_->trace_mode,
798                 0);
799  }
800
801  return true;
802}
803
804void Runtime::EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) {
805  DCHECK_GT(threads_being_born_, 0U);
806  threads_being_born_--;
807  if (shutting_down_started_ && threads_being_born_ == 0) {
808    shutdown_cond_->Broadcast(Thread::Current());
809  }
810}
811
812void Runtime::InitNonZygoteOrPostFork(
813    JNIEnv* env, bool is_system_server, NativeBridgeAction action, const char* isa) {
814  is_zygote_ = false;
815
816  if (is_native_bridge_loaded_) {
817    switch (action) {
818      case NativeBridgeAction::kUnload:
819        UnloadNativeBridge();
820        is_native_bridge_loaded_ = false;
821        break;
822
823      case NativeBridgeAction::kInitialize:
824        InitializeNativeBridge(env, isa);
825        break;
826    }
827  }
828
829  // Create the thread pools.
830  heap_->CreateThreadPool();
831  // Reset the gc performance data at zygote fork so that the GCs
832  // before fork aren't attributed to an app.
833  heap_->ResetGcPerformanceInfo();
834
835  // We may want to collect profiling samples for system server, but we never want to JIT there.
836  if ((!is_system_server || !jit_options_->UseJitCompilation()) &&
837      !safe_mode_ &&
838      (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) &&
839      jit_ == nullptr) {
840    // Note that when running ART standalone (not zygote, nor zygote fork),
841    // the jit may have already been created.
842    CreateJit();
843  }
844
845  StartSignalCatcher();
846
847  // Start the JDWP thread. If the command-line debugger flags specified "suspend=y",
848  // this will pause the runtime, so we probably want this to come last.
849  Dbg::StartJdwp();
850}
851
852void Runtime::StartSignalCatcher() {
853  if (!is_zygote_) {
854    signal_catcher_ = new SignalCatcher(stack_trace_file_, use_tombstoned_traces_);
855  }
856}
857
858bool Runtime::IsShuttingDown(Thread* self) {
859  MutexLock mu(self, *Locks::runtime_shutdown_lock_);
860  return IsShuttingDownLocked();
861}
862
863void Runtime::StartDaemonThreads() {
864  ScopedTrace trace(__FUNCTION__);
865  VLOG(startup) << "Runtime::StartDaemonThreads entering";
866
867  Thread* self = Thread::Current();
868
869  // Must be in the kNative state for calling native methods.
870  CHECK_EQ(self->GetState(), kNative);
871
872  JNIEnv* env = self->GetJniEnv();
873  env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
874                            WellKnownClasses::java_lang_Daemons_start);
875  if (env->ExceptionCheck()) {
876    env->ExceptionDescribe();
877    LOG(FATAL) << "Error starting java.lang.Daemons";
878  }
879
880  VLOG(startup) << "Runtime::StartDaemonThreads exiting";
881}
882
883// Attempts to open dex files from image(s). Given the image location, try to find the oat file
884// and open it to get the stored dex file. If the image is the first for a multi-image boot
885// classpath, go on and also open the other images.
886static bool OpenDexFilesFromImage(const std::string& image_location,
887                                  std::vector<std::unique_ptr<const DexFile>>* dex_files,
888                                  size_t* failures) {
889  DCHECK(dex_files != nullptr) << "OpenDexFilesFromImage: out-param is nullptr";
890
891  // Use a work-list approach, so that we can easily reuse the opening code.
892  std::vector<std::string> image_locations;
893  image_locations.push_back(image_location);
894
895  for (size_t index = 0; index < image_locations.size(); ++index) {
896    std::string system_filename;
897    bool has_system = false;
898    std::string cache_filename_unused;
899    bool dalvik_cache_exists_unused;
900    bool has_cache_unused;
901    bool is_global_cache_unused;
902    bool found_image = gc::space::ImageSpace::FindImageFilename(image_locations[index].c_str(),
903                                                                kRuntimeISA,
904                                                                &system_filename,
905                                                                &has_system,
906                                                                &cache_filename_unused,
907                                                                &dalvik_cache_exists_unused,
908                                                                &has_cache_unused,
909                                                                &is_global_cache_unused);
910
911    if (!found_image || !has_system) {
912      return false;
913    }
914
915    // We are falling back to non-executable use of the oat file because patching failed, presumably
916    // due to lack of space.
917    std::string vdex_filename =
918        ImageHeader::GetVdexLocationFromImageLocation(system_filename.c_str());
919    std::string oat_filename =
920        ImageHeader::GetOatLocationFromImageLocation(system_filename.c_str());
921    std::string oat_location =
922        ImageHeader::GetOatLocationFromImageLocation(image_locations[index].c_str());
923    // Note: in the multi-image case, the image location may end in ".jar," and not ".art." Handle
924    //       that here.
925    if (android::base::EndsWith(oat_location, ".jar")) {
926      oat_location.replace(oat_location.length() - 3, 3, "oat");
927    }
928    std::string error_msg;
929
930    std::unique_ptr<VdexFile> vdex_file(VdexFile::Open(vdex_filename,
931                                                       false /* writable */,
932                                                       false /* low_4gb */,
933                                                       false, /* unquicken */
934                                                       &error_msg));
935    if (vdex_file.get() == nullptr) {
936      return false;
937    }
938
939    std::unique_ptr<File> file(OS::OpenFileForReading(oat_filename.c_str()));
940    if (file.get() == nullptr) {
941      return false;
942    }
943    std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file.get(),
944                                                    false /* writable */,
945                                                    false /* program_header_only */,
946                                                    false /* low_4gb */,
947                                                    &error_msg));
948    if (elf_file.get() == nullptr) {
949      return false;
950    }
951    std::unique_ptr<const OatFile> oat_file(
952        OatFile::OpenWithElfFile(elf_file.release(),
953                                 vdex_file.release(),
954                                 oat_location,
955                                 nullptr,
956                                 &error_msg));
957    if (oat_file == nullptr) {
958      LOG(WARNING) << "Unable to use '" << oat_filename << "' because " << error_msg;
959      return false;
960    }
961
962    for (const OatFile::OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
963      if (oat_dex_file == nullptr) {
964        *failures += 1;
965        continue;
966      }
967      std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error_msg);
968      if (dex_file.get() == nullptr) {
969        *failures += 1;
970      } else {
971        dex_files->push_back(std::move(dex_file));
972      }
973    }
974
975    if (index == 0) {
976      // First file. See if this is a multi-image environment, and if so, enqueue the other images.
977      const OatHeader& boot_oat_header = oat_file->GetOatHeader();
978      const char* boot_cp = boot_oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey);
979      if (boot_cp != nullptr) {
980        gc::space::ImageSpace::ExtractMultiImageLocations(image_locations[0],
981                                                          boot_cp,
982                                                          &image_locations);
983      }
984    }
985
986    Runtime::Current()->GetOatFileManager().RegisterOatFile(std::move(oat_file));
987  }
988  return true;
989}
990
991
992static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames,
993                           const std::vector<std::string>& dex_locations,
994                           const std::string& image_location,
995                           std::vector<std::unique_ptr<const DexFile>>* dex_files) {
996  DCHECK(dex_files != nullptr) << "OpenDexFiles: out-param is nullptr";
997  size_t failure_count = 0;
998  if (!image_location.empty() && OpenDexFilesFromImage(image_location, dex_files, &failure_count)) {
999    return failure_count;
1000  }
1001  failure_count = 0;
1002  for (size_t i = 0; i < dex_filenames.size(); i++) {
1003    const char* dex_filename = dex_filenames[i].c_str();
1004    const char* dex_location = dex_locations[i].c_str();
1005    static constexpr bool kVerifyChecksum = true;
1006    std::string error_msg;
1007    if (!OS::FileExists(dex_filename)) {
1008      LOG(WARNING) << "Skipping non-existent dex file '" << dex_filename << "'";
1009      continue;
1010    }
1011    if (!DexFile::Open(dex_filename, dex_location, kVerifyChecksum, &error_msg, dex_files)) {
1012      LOG(WARNING) << "Failed to open .dex from file '" << dex_filename << "': " << error_msg;
1013      ++failure_count;
1014    }
1015  }
1016  return failure_count;
1017}
1018
1019void Runtime::SetSentinel(mirror::Object* sentinel) {
1020  CHECK(sentinel_.Read() == nullptr);
1021  CHECK(sentinel != nullptr);
1022  CHECK(!heap_->IsMovableObject(sentinel));
1023  sentinel_ = GcRoot<mirror::Object>(sentinel);
1024}
1025
1026bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
1027  // (b/30160149): protect subprocesses from modifications to LD_LIBRARY_PATH, etc.
1028  // Take a snapshot of the environment at the time the runtime was created, for use by Exec, etc.
1029  env_snapshot_.TakeSnapshot();
1030
1031  RuntimeArgumentMap runtime_options(std::move(runtime_options_in));
1032  ScopedTrace trace(__FUNCTION__);
1033  CHECK_EQ(sysconf(_SC_PAGE_SIZE), kPageSize);
1034
1035  MemMap::Init();
1036
1037  // Try to reserve a dedicated fault page. This is allocated for clobbered registers and sentinels.
1038  // If we cannot reserve it, log a warning.
1039  // Note: We allocate this first to have a good chance of grabbing the page. The address (0xebad..)
1040  //       is out-of-the-way enough that it should not collide with boot image mapping.
1041  // Note: Don't request an error message. That will lead to a maps dump in the case of failure,
1042  //       leading to logspam.
1043  {
1044    constexpr uintptr_t kSentinelAddr =
1045        RoundDown(static_cast<uintptr_t>(Context::kBadGprBase), kPageSize);
1046    protected_fault_page_.reset(MemMap::MapAnonymous("Sentinel fault page",
1047                                                     reinterpret_cast<uint8_t*>(kSentinelAddr),
1048                                                     kPageSize,
1049                                                     PROT_NONE,
1050                                                     /* low_4g */ true,
1051                                                     /* reuse */ false,
1052                                                     /* error_msg */ nullptr));
1053    if (protected_fault_page_ == nullptr) {
1054      LOG(WARNING) << "Could not reserve sentinel fault page";
1055    } else if (reinterpret_cast<uintptr_t>(protected_fault_page_->Begin()) != kSentinelAddr) {
1056      LOG(WARNING) << "Could not reserve sentinel fault page at the right address.";
1057      protected_fault_page_.reset();
1058    }
1059  }
1060
1061  using Opt = RuntimeArgumentMap;
1062  VLOG(startup) << "Runtime::Init -verbose:startup enabled";
1063
1064  QuasiAtomic::Startup();
1065
1066  oat_file_manager_ = new OatFileManager;
1067
1068  Thread::SetSensitiveThreadHook(runtime_options.GetOrDefault(Opt::HookIsSensitiveThread));
1069  Monitor::Init(runtime_options.GetOrDefault(Opt::LockProfThreshold),
1070                runtime_options.GetOrDefault(Opt::StackDumpLockProfThreshold));
1071
1072  boot_class_path_string_ = runtime_options.ReleaseOrDefault(Opt::BootClassPath);
1073  class_path_string_ = runtime_options.ReleaseOrDefault(Opt::ClassPath);
1074  properties_ = runtime_options.ReleaseOrDefault(Opt::PropertiesList);
1075
1076  compiler_callbacks_ = runtime_options.GetOrDefault(Opt::CompilerCallbacksPtr);
1077  patchoat_executable_ = runtime_options.ReleaseOrDefault(Opt::PatchOat);
1078  must_relocate_ = runtime_options.GetOrDefault(Opt::Relocate);
1079  is_zygote_ = runtime_options.Exists(Opt::Zygote);
1080  is_explicit_gc_disabled_ = runtime_options.Exists(Opt::DisableExplicitGC);
1081  dex2oat_enabled_ = runtime_options.GetOrDefault(Opt::Dex2Oat);
1082  image_dex2oat_enabled_ = runtime_options.GetOrDefault(Opt::ImageDex2Oat);
1083  dump_native_stack_on_sig_quit_ = runtime_options.GetOrDefault(Opt::DumpNativeStackOnSigQuit);
1084
1085  vfprintf_ = runtime_options.GetOrDefault(Opt::HookVfprintf);
1086  exit_ = runtime_options.GetOrDefault(Opt::HookExit);
1087  abort_ = runtime_options.GetOrDefault(Opt::HookAbort);
1088
1089  default_stack_size_ = runtime_options.GetOrDefault(Opt::StackSize);
1090  use_tombstoned_traces_ = runtime_options.GetOrDefault(Opt::UseTombstonedTraces);
1091#if !defined(ART_TARGET_ANDROID)
1092  CHECK(!use_tombstoned_traces_)
1093      << "-Xusetombstonedtraces is only supported in an Android environment";
1094#endif
1095  stack_trace_file_ = runtime_options.ReleaseOrDefault(Opt::StackTraceFile);
1096
1097  compiler_executable_ = runtime_options.ReleaseOrDefault(Opt::Compiler);
1098  compiler_options_ = runtime_options.ReleaseOrDefault(Opt::CompilerOptions);
1099  for (StringPiece option : Runtime::Current()->GetCompilerOptions()) {
1100    if (option.starts_with("--debuggable")) {
1101      SetJavaDebuggable(true);
1102      break;
1103    }
1104  }
1105  image_compiler_options_ = runtime_options.ReleaseOrDefault(Opt::ImageCompilerOptions);
1106  image_location_ = runtime_options.GetOrDefault(Opt::Image);
1107
1108  max_spins_before_thin_lock_inflation_ =
1109      runtime_options.GetOrDefault(Opt::MaxSpinsBeforeThinLockInflation);
1110
1111  monitor_list_ = new MonitorList;
1112  monitor_pool_ = MonitorPool::Create();
1113  thread_list_ = new ThreadList(runtime_options.GetOrDefault(Opt::ThreadSuspendTimeout));
1114  intern_table_ = new InternTable;
1115
1116  verify_ = runtime_options.GetOrDefault(Opt::Verify);
1117  allow_dex_file_fallback_ = !runtime_options.Exists(Opt::NoDexFileFallback);
1118
1119  no_sig_chain_ = runtime_options.Exists(Opt::NoSigChain);
1120  force_native_bridge_ = runtime_options.Exists(Opt::ForceNativeBridge);
1121
1122  Split(runtime_options.GetOrDefault(Opt::CpuAbiList), ',', &cpu_abilist_);
1123
1124  fingerprint_ = runtime_options.ReleaseOrDefault(Opt::Fingerprint);
1125
1126  if (runtime_options.GetOrDefault(Opt::Interpret)) {
1127    GetInstrumentation()->ForceInterpretOnly();
1128  }
1129
1130  zygote_max_failed_boots_ = runtime_options.GetOrDefault(Opt::ZygoteMaxFailedBoots);
1131  experimental_flags_ = runtime_options.GetOrDefault(Opt::Experimental);
1132  is_low_memory_mode_ = runtime_options.Exists(Opt::LowMemoryMode);
1133
1134  plugins_ = runtime_options.ReleaseOrDefault(Opt::Plugins);
1135  agents_ = runtime_options.ReleaseOrDefault(Opt::AgentPath);
1136  // TODO Add back in -agentlib
1137  // for (auto lib : runtime_options.ReleaseOrDefault(Opt::AgentLib)) {
1138  //   agents_.push_back(lib);
1139  // }
1140
1141  XGcOption xgc_option = runtime_options.GetOrDefault(Opt::GcOption);
1142  heap_ = new gc::Heap(runtime_options.GetOrDefault(Opt::MemoryInitialSize),
1143                       runtime_options.GetOrDefault(Opt::HeapGrowthLimit),
1144                       runtime_options.GetOrDefault(Opt::HeapMinFree),
1145                       runtime_options.GetOrDefault(Opt::HeapMaxFree),
1146                       runtime_options.GetOrDefault(Opt::HeapTargetUtilization),
1147                       runtime_options.GetOrDefault(Opt::ForegroundHeapGrowthMultiplier),
1148                       runtime_options.GetOrDefault(Opt::MemoryMaximumSize),
1149                       runtime_options.GetOrDefault(Opt::NonMovingSpaceCapacity),
1150                       runtime_options.GetOrDefault(Opt::Image),
1151                       runtime_options.GetOrDefault(Opt::ImageInstructionSet),
1152                       // Override the collector type to CC if the read barrier config.
1153                       kUseReadBarrier ? gc::kCollectorTypeCC : xgc_option.collector_type_,
1154                       kUseReadBarrier ? BackgroundGcOption(gc::kCollectorTypeCCBackground)
1155                                       : runtime_options.GetOrDefault(Opt::BackgroundGc),
1156                       runtime_options.GetOrDefault(Opt::LargeObjectSpace),
1157                       runtime_options.GetOrDefault(Opt::LargeObjectThreshold),
1158                       runtime_options.GetOrDefault(Opt::ParallelGCThreads),
1159                       runtime_options.GetOrDefault(Opt::ConcGCThreads),
1160                       runtime_options.Exists(Opt::LowMemoryMode),
1161                       runtime_options.GetOrDefault(Opt::LongPauseLogThreshold),
1162                       runtime_options.GetOrDefault(Opt::LongGCLogThreshold),
1163                       runtime_options.Exists(Opt::IgnoreMaxFootprint),
1164                       runtime_options.GetOrDefault(Opt::UseTLAB),
1165                       xgc_option.verify_pre_gc_heap_,
1166                       xgc_option.verify_pre_sweeping_heap_,
1167                       xgc_option.verify_post_gc_heap_,
1168                       xgc_option.verify_pre_gc_rosalloc_,
1169                       xgc_option.verify_pre_sweeping_rosalloc_,
1170                       xgc_option.verify_post_gc_rosalloc_,
1171                       xgc_option.gcstress_,
1172                       xgc_option.measure_,
1173                       runtime_options.GetOrDefault(Opt::EnableHSpaceCompactForOOM),
1174                       runtime_options.GetOrDefault(Opt::HSpaceCompactForOOMMinIntervalsMs));
1175
1176  if (!heap_->HasBootImageSpace() && !allow_dex_file_fallback_) {
1177    LOG(ERROR) << "Dex file fallback disabled, cannot continue without image.";
1178    return false;
1179  }
1180
1181  dump_gc_performance_on_shutdown_ = runtime_options.Exists(Opt::DumpGCPerformanceOnShutdown);
1182
1183  if (runtime_options.Exists(Opt::JdwpOptions)) {
1184    Dbg::ConfigureJdwp(runtime_options.GetOrDefault(Opt::JdwpOptions));
1185  }
1186  callbacks_->AddThreadLifecycleCallback(Dbg::GetThreadLifecycleCallback());
1187  callbacks_->AddClassLoadCallback(Dbg::GetClassLoadCallback());
1188
1189  jit_options_.reset(jit::JitOptions::CreateFromRuntimeArguments(runtime_options));
1190  if (IsAotCompiler()) {
1191    // If we are already the compiler at this point, we must be dex2oat. Don't create the jit in
1192    // this case.
1193    // If runtime_options doesn't have UseJIT set to true then CreateFromRuntimeArguments returns
1194    // null and we don't create the jit.
1195    jit_options_->SetUseJitCompilation(false);
1196    jit_options_->SetSaveProfilingInfo(false);
1197  }
1198
1199  // Use MemMap arena pool for jit, malloc otherwise. Malloc arenas are faster to allocate but
1200  // can't be trimmed as easily.
1201  const bool use_malloc = IsAotCompiler();
1202  arena_pool_.reset(new ArenaPool(use_malloc, /* low_4gb */ false));
1203  jit_arena_pool_.reset(
1204      new ArenaPool(/* use_malloc */ false, /* low_4gb */ false, "CompilerMetadata"));
1205
1206  if (IsAotCompiler() && Is64BitInstructionSet(kRuntimeISA)) {
1207    // 4gb, no malloc. Explanation in header.
1208    low_4gb_arena_pool_.reset(new ArenaPool(/* use_malloc */ false, /* low_4gb */ true));
1209  }
1210  linear_alloc_.reset(CreateLinearAlloc());
1211
1212  BlockSignals();
1213  InitPlatformSignalHandlers();
1214
1215  // Change the implicit checks flags based on runtime architecture.
1216  switch (kRuntimeISA) {
1217    case kArm:
1218    case kThumb2:
1219    case kX86:
1220    case kArm64:
1221    case kX86_64:
1222    case kMips:
1223    case kMips64:
1224      implicit_null_checks_ = true;
1225      // Installing stack protection does not play well with valgrind.
1226      implicit_so_checks_ = !(RUNNING_ON_MEMORY_TOOL && kMemoryToolIsValgrind);
1227      break;
1228    default:
1229      // Keep the defaults.
1230      break;
1231  }
1232
1233  if (!no_sig_chain_) {
1234    // Dex2Oat's Runtime does not need the signal chain or the fault handler.
1235    if (implicit_null_checks_ || implicit_so_checks_ || implicit_suspend_checks_) {
1236      fault_manager.Init();
1237
1238      // These need to be in a specific order.  The null point check handler must be
1239      // after the suspend check and stack overflow check handlers.
1240      //
1241      // Note: the instances attach themselves to the fault manager and are handled by it. The manager
1242      //       will delete the instance on Shutdown().
1243      if (implicit_suspend_checks_) {
1244        new SuspensionHandler(&fault_manager);
1245      }
1246
1247      if (implicit_so_checks_) {
1248        new StackOverflowHandler(&fault_manager);
1249      }
1250
1251      if (implicit_null_checks_) {
1252        new NullPointerHandler(&fault_manager);
1253      }
1254
1255      if (kEnableJavaStackTraceHandler) {
1256        new JavaStackTraceHandler(&fault_manager);
1257      }
1258    }
1259  }
1260
1261  std::string error_msg;
1262  java_vm_ = JavaVMExt::Create(this, runtime_options, &error_msg);
1263  if (java_vm_.get() == nullptr) {
1264    LOG(ERROR) << "Could not initialize JavaVMExt: " << error_msg;
1265    return false;
1266  }
1267
1268  // Add the JniEnv handler.
1269  // TODO Refactor this stuff.
1270  java_vm_->AddEnvironmentHook(JNIEnvExt::GetEnvHandler);
1271
1272  Thread::Startup();
1273
1274  // ClassLinker needs an attached thread, but we can't fully attach a thread without creating
1275  // objects. We can't supply a thread group yet; it will be fixed later. Since we are the main
1276  // thread, we do not get a java peer.
1277  Thread* self = Thread::Attach("main", false, nullptr, false);
1278  CHECK_EQ(self->GetThreadId(), ThreadList::kMainThreadId);
1279  CHECK(self != nullptr);
1280
1281  self->SetCanCallIntoJava(!IsAotCompiler());
1282
1283  // Set us to runnable so tools using a runtime can allocate and GC by default
1284  self->TransitionFromSuspendedToRunnable();
1285
1286  // Now we're attached, we can take the heap locks and validate the heap.
1287  GetHeap()->EnableObjectValidation();
1288
1289  CHECK_GE(GetHeap()->GetContinuousSpaces().size(), 1U);
1290  if (UNLIKELY(IsAotCompiler())) {
1291    class_linker_ = new AotClassLinker(intern_table_);
1292  } else {
1293    class_linker_ = new ClassLinker(intern_table_);
1294  }
1295  if (GetHeap()->HasBootImageSpace()) {
1296    bool result = class_linker_->InitFromBootImage(&error_msg);
1297    if (!result) {
1298      LOG(ERROR) << "Could not initialize from image: " << error_msg;
1299      return false;
1300    }
1301    if (kIsDebugBuild) {
1302      for (auto image_space : GetHeap()->GetBootImageSpaces()) {
1303        image_space->VerifyImageAllocations();
1304      }
1305    }
1306    if (boot_class_path_string_.empty()) {
1307      // The bootclasspath is not explicitly specified: construct it from the loaded dex files.
1308      const std::vector<const DexFile*>& boot_class_path = GetClassLinker()->GetBootClassPath();
1309      std::vector<std::string> dex_locations;
1310      dex_locations.reserve(boot_class_path.size());
1311      for (const DexFile* dex_file : boot_class_path) {
1312        dex_locations.push_back(dex_file->GetLocation());
1313      }
1314      boot_class_path_string_ = android::base::Join(dex_locations, ':');
1315    }
1316    {
1317      ScopedTrace trace2("AddImageStringsToTable");
1318      GetInternTable()->AddImagesStringsToTable(heap_->GetBootImageSpaces());
1319    }
1320    if (IsJavaDebuggable()) {
1321      // Now that we have loaded the boot image, deoptimize its methods if we are running
1322      // debuggable, as the code may have been compiled non-debuggable.
1323      DeoptimizeBootImage();
1324    }
1325  } else {
1326    std::vector<std::string> dex_filenames;
1327    Split(boot_class_path_string_, ':', &dex_filenames);
1328
1329    std::vector<std::string> dex_locations;
1330    if (!runtime_options.Exists(Opt::BootClassPathLocations)) {
1331      dex_locations = dex_filenames;
1332    } else {
1333      dex_locations = runtime_options.GetOrDefault(Opt::BootClassPathLocations);
1334      CHECK_EQ(dex_filenames.size(), dex_locations.size());
1335    }
1336
1337    std::vector<std::unique_ptr<const DexFile>> boot_class_path;
1338    if (runtime_options.Exists(Opt::BootClassPathDexList)) {
1339      boot_class_path.swap(*runtime_options.GetOrDefault(Opt::BootClassPathDexList));
1340    } else {
1341      OpenDexFiles(dex_filenames,
1342                   dex_locations,
1343                   runtime_options.GetOrDefault(Opt::Image),
1344                   &boot_class_path);
1345    }
1346    instruction_set_ = runtime_options.GetOrDefault(Opt::ImageInstructionSet);
1347    if (!class_linker_->InitWithoutImage(std::move(boot_class_path), &error_msg)) {
1348      LOG(ERROR) << "Could not initialize without image: " << error_msg;
1349      return false;
1350    }
1351
1352    // TODO: Should we move the following to InitWithoutImage?
1353    SetInstructionSet(instruction_set_);
1354    for (uint32_t i = 0; i < kCalleeSaveSize; i++) {
1355      CalleeSaveType type = CalleeSaveType(i);
1356      if (!HasCalleeSaveMethod(type)) {
1357        SetCalleeSaveMethod(CreateCalleeSaveMethod(), type);
1358      }
1359    }
1360  }
1361
1362  CHECK(class_linker_ != nullptr);
1363
1364  verifier::MethodVerifier::Init();
1365
1366  if (runtime_options.Exists(Opt::MethodTrace)) {
1367    trace_config_.reset(new TraceConfig());
1368    trace_config_->trace_file = runtime_options.ReleaseOrDefault(Opt::MethodTraceFile);
1369    trace_config_->trace_file_size = runtime_options.ReleaseOrDefault(Opt::MethodTraceFileSize);
1370    trace_config_->trace_mode = Trace::TraceMode::kMethodTracing;
1371    trace_config_->trace_output_mode = runtime_options.Exists(Opt::MethodTraceStreaming) ?
1372        Trace::TraceOutputMode::kStreaming :
1373        Trace::TraceOutputMode::kFile;
1374  }
1375
1376  // TODO: move this to just be an Trace::Start argument
1377  Trace::SetDefaultClockSource(runtime_options.GetOrDefault(Opt::ProfileClock));
1378
1379  // Pre-allocate an OutOfMemoryError for the double-OOME case.
1380  self->ThrowNewException("Ljava/lang/OutOfMemoryError;",
1381                          "OutOfMemoryError thrown while trying to throw OutOfMemoryError; "
1382                          "no stack trace available");
1383  pre_allocated_OutOfMemoryError_ = GcRoot<mirror::Throwable>(self->GetException());
1384  self->ClearException();
1385
1386  // Pre-allocate a NoClassDefFoundError for the common case of failing to find a system class
1387  // ahead of checking the application's class loader.
1388  self->ThrowNewException("Ljava/lang/NoClassDefFoundError;",
1389                          "Class not found using the boot class loader; no stack trace available");
1390  pre_allocated_NoClassDefFoundError_ = GcRoot<mirror::Throwable>(self->GetException());
1391  self->ClearException();
1392
1393  // Runtime initialization is largely done now.
1394  // We load plugins first since that can modify the runtime state slightly.
1395  // Load all plugins
1396  for (auto& plugin : plugins_) {
1397    std::string err;
1398    if (!plugin.Load(&err)) {
1399      LOG(FATAL) << plugin << " failed to load: " << err;
1400    }
1401  }
1402
1403  // Look for a native bridge.
1404  //
1405  // The intended flow here is, in the case of a running system:
1406  //
1407  // Runtime::Init() (zygote):
1408  //   LoadNativeBridge -> dlopen from cmd line parameter.
1409  //  |
1410  //  V
1411  // Runtime::Start() (zygote):
1412  //   No-op wrt native bridge.
1413  //  |
1414  //  | start app
1415  //  V
1416  // DidForkFromZygote(action)
1417  //   action = kUnload -> dlclose native bridge.
1418  //   action = kInitialize -> initialize library
1419  //
1420  //
1421  // The intended flow here is, in the case of a simple dalvikvm call:
1422  //
1423  // Runtime::Init():
1424  //   LoadNativeBridge -> dlopen from cmd line parameter.
1425  //  |
1426  //  V
1427  // Runtime::Start():
1428  //   DidForkFromZygote(kInitialize) -> try to initialize any native bridge given.
1429  //   No-op wrt native bridge.
1430  {
1431    std::string native_bridge_file_name = runtime_options.ReleaseOrDefault(Opt::NativeBridge);
1432    is_native_bridge_loaded_ = LoadNativeBridge(native_bridge_file_name);
1433  }
1434
1435  // Startup agents
1436  // TODO Maybe we should start a new thread to run these on. Investigate RI behavior more.
1437  for (auto& agent : agents_) {
1438    // TODO Check err
1439    int res = 0;
1440    std::string err = "";
1441    ti::Agent::LoadError result = agent.Load(&res, &err);
1442    if (result == ti::Agent::kInitializationError) {
1443      LOG(FATAL) << "Unable to initialize agent!";
1444    } else if (result != ti::Agent::kNoError) {
1445      LOG(ERROR) << "Unable to load an agent: " << err;
1446    }
1447  }
1448  {
1449    ScopedObjectAccess soa(self);
1450    callbacks_->NextRuntimePhase(RuntimePhaseCallback::RuntimePhase::kInitialAgents);
1451  }
1452
1453  VLOG(startup) << "Runtime::Init exiting";
1454
1455  return true;
1456}
1457
1458static bool EnsureJvmtiPlugin(Runtime* runtime,
1459                              std::vector<Plugin>* plugins,
1460                              std::string* error_msg) {
1461  constexpr const char* plugin_name = kIsDebugBuild ? "libopenjdkjvmtid.so" : "libopenjdkjvmti.so";
1462
1463  // Is the plugin already loaded?
1464  for (const Plugin& p : *plugins) {
1465    if (p.GetLibrary() == plugin_name) {
1466      return true;
1467    }
1468  }
1469
1470  // Is the process debuggable? Otherwise, do not attempt to load the plugin.
1471  if (!runtime->IsJavaDebuggable()) {
1472    *error_msg = "Process is not debuggable.";
1473    return false;
1474  }
1475
1476  Plugin new_plugin = Plugin::Create(plugin_name);
1477
1478  if (!new_plugin.Load(error_msg)) {
1479    return false;
1480  }
1481
1482  plugins->push_back(std::move(new_plugin));
1483  return true;
1484}
1485
1486// Attach a new agent and add it to the list of runtime agents
1487//
1488// TODO: once we decide on the threading model for agents,
1489//   revisit this and make sure we're doing this on the right thread
1490//   (and we synchronize access to any shared data structures like "agents_")
1491//
1492void Runtime::AttachAgent(const std::string& agent_arg) {
1493  std::string error_msg;
1494  if (!EnsureJvmtiPlugin(this, &plugins_, &error_msg)) {
1495    LOG(WARNING) << "Could not load plugin: " << error_msg;
1496    ScopedObjectAccess soa(Thread::Current());
1497    ThrowIOException("%s", error_msg.c_str());
1498    return;
1499  }
1500
1501  ti::Agent agent(agent_arg);
1502
1503  int res = 0;
1504  ti::Agent::LoadError result = agent.Attach(&res, &error_msg);
1505
1506  if (result == ti::Agent::kNoError) {
1507    agents_.push_back(std::move(agent));
1508  } else {
1509    LOG(WARNING) << "Agent attach failed (result=" << result << ") : " << error_msg;
1510    ScopedObjectAccess soa(Thread::Current());
1511    ThrowIOException("%s", error_msg.c_str());
1512  }
1513}
1514
1515void Runtime::InitNativeMethods() {
1516  VLOG(startup) << "Runtime::InitNativeMethods entering";
1517  Thread* self = Thread::Current();
1518  JNIEnv* env = self->GetJniEnv();
1519
1520  // Must be in the kNative state for calling native methods (JNI_OnLoad code).
1521  CHECK_EQ(self->GetState(), kNative);
1522
1523  // First set up JniConstants, which is used by both the runtime's built-in native
1524  // methods and libcore.
1525  JniConstants::init(env);
1526
1527  // Then set up the native methods provided by the runtime itself.
1528  RegisterRuntimeNativeMethods(env);
1529
1530  // Initialize classes used in JNI. The initialization requires runtime native
1531  // methods to be loaded first.
1532  WellKnownClasses::Init(env);
1533
1534  // Then set up libjavacore / libopenjdk, which are just a regular JNI libraries with
1535  // a regular JNI_OnLoad. Most JNI libraries can just use System.loadLibrary, but
1536  // libcore can't because it's the library that implements System.loadLibrary!
1537  {
1538    std::string error_msg;
1539    if (!java_vm_->LoadNativeLibrary(env, "libjavacore.so", nullptr, nullptr, &error_msg)) {
1540      LOG(FATAL) << "LoadNativeLibrary failed for \"libjavacore.so\": " << error_msg;
1541    }
1542  }
1543  {
1544    constexpr const char* kOpenJdkLibrary = kIsDebugBuild
1545                                                ? "libopenjdkd.so"
1546                                                : "libopenjdk.so";
1547    std::string error_msg;
1548    if (!java_vm_->LoadNativeLibrary(env, kOpenJdkLibrary, nullptr, nullptr, &error_msg)) {
1549      LOG(FATAL) << "LoadNativeLibrary failed for \"" << kOpenJdkLibrary << "\": " << error_msg;
1550    }
1551  }
1552
1553  // Initialize well known classes that may invoke runtime native methods.
1554  WellKnownClasses::LateInit(env);
1555
1556  VLOG(startup) << "Runtime::InitNativeMethods exiting";
1557}
1558
1559void Runtime::ReclaimArenaPoolMemory() {
1560  arena_pool_->LockReclaimMemory();
1561}
1562
1563void Runtime::InitThreadGroups(Thread* self) {
1564  JNIEnvExt* env = self->GetJniEnv();
1565  ScopedJniEnvLocalRefState env_state(env);
1566  main_thread_group_ =
1567      env->NewGlobalRef(env->GetStaticObjectField(
1568          WellKnownClasses::java_lang_ThreadGroup,
1569          WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup));
1570  CHECK(main_thread_group_ != nullptr || IsAotCompiler());
1571  system_thread_group_ =
1572      env->NewGlobalRef(env->GetStaticObjectField(
1573          WellKnownClasses::java_lang_ThreadGroup,
1574          WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup));
1575  CHECK(system_thread_group_ != nullptr || IsAotCompiler());
1576}
1577
1578jobject Runtime::GetMainThreadGroup() const {
1579  CHECK(main_thread_group_ != nullptr || IsAotCompiler());
1580  return main_thread_group_;
1581}
1582
1583jobject Runtime::GetSystemThreadGroup() const {
1584  CHECK(system_thread_group_ != nullptr || IsAotCompiler());
1585  return system_thread_group_;
1586}
1587
1588jobject Runtime::GetSystemClassLoader() const {
1589  CHECK(system_class_loader_ != nullptr || IsAotCompiler());
1590  return system_class_loader_;
1591}
1592
1593void Runtime::RegisterRuntimeNativeMethods(JNIEnv* env) {
1594  register_dalvik_system_DexFile(env);
1595  register_dalvik_system_VMDebug(env);
1596  register_dalvik_system_VMRuntime(env);
1597  register_dalvik_system_VMStack(env);
1598  register_dalvik_system_ZygoteHooks(env);
1599  register_java_lang_Class(env);
1600  register_java_lang_Object(env);
1601  register_java_lang_invoke_MethodHandleImpl(env);
1602  register_java_lang_ref_FinalizerReference(env);
1603  register_java_lang_reflect_Array(env);
1604  register_java_lang_reflect_Constructor(env);
1605  register_java_lang_reflect_Executable(env);
1606  register_java_lang_reflect_Field(env);
1607  register_java_lang_reflect_Method(env);
1608  register_java_lang_reflect_Parameter(env);
1609  register_java_lang_reflect_Proxy(env);
1610  register_java_lang_ref_Reference(env);
1611  register_java_lang_String(env);
1612  register_java_lang_StringFactory(env);
1613  register_java_lang_System(env);
1614  register_java_lang_Thread(env);
1615  register_java_lang_Throwable(env);
1616  register_java_lang_VMClassLoader(env);
1617  register_java_lang_Void(env);
1618  register_java_util_concurrent_atomic_AtomicLong(env);
1619  register_libcore_util_CharsetUtils(env);
1620  register_org_apache_harmony_dalvik_ddmc_DdmServer(env);
1621  register_org_apache_harmony_dalvik_ddmc_DdmVmInternal(env);
1622  register_sun_misc_Unsafe(env);
1623}
1624
1625std::ostream& operator<<(std::ostream& os, const DeoptimizationKind& kind) {
1626  os << GetDeoptimizationKindName(kind);
1627  return os;
1628}
1629
1630void Runtime::DumpDeoptimizations(std::ostream& os) {
1631  for (size_t i = 0; i <= static_cast<size_t>(DeoptimizationKind::kLast); ++i) {
1632    if (deoptimization_counts_[i] != 0) {
1633      os << "Number of "
1634         << GetDeoptimizationKindName(static_cast<DeoptimizationKind>(i))
1635         << " deoptimizations: "
1636         << deoptimization_counts_[i]
1637         << "\n";
1638    }
1639  }
1640}
1641
1642void Runtime::DumpForSigQuit(std::ostream& os) {
1643  GetClassLinker()->DumpForSigQuit(os);
1644  GetInternTable()->DumpForSigQuit(os);
1645  GetJavaVM()->DumpForSigQuit(os);
1646  GetHeap()->DumpForSigQuit(os);
1647  oat_file_manager_->DumpForSigQuit(os);
1648  if (GetJit() != nullptr) {
1649    GetJit()->DumpForSigQuit(os);
1650  } else {
1651    os << "Running non JIT\n";
1652  }
1653  DumpDeoptimizations(os);
1654  TrackedAllocators::Dump(os);
1655  os << "\n";
1656
1657  thread_list_->DumpForSigQuit(os);
1658  BaseMutex::DumpAll(os);
1659
1660  // Inform anyone else who is interested in SigQuit.
1661  {
1662    ScopedObjectAccess soa(Thread::Current());
1663    callbacks_->SigQuit();
1664  }
1665}
1666
1667void Runtime::DumpLockHolders(std::ostream& os) {
1668  uint64_t mutator_lock_owner = Locks::mutator_lock_->GetExclusiveOwnerTid();
1669  pid_t thread_list_lock_owner = GetThreadList()->GetLockOwner();
1670  pid_t classes_lock_owner = GetClassLinker()->GetClassesLockOwner();
1671  pid_t dex_lock_owner = GetClassLinker()->GetDexLockOwner();
1672  if ((thread_list_lock_owner | classes_lock_owner | dex_lock_owner) != 0) {
1673    os << "Mutator lock exclusive owner tid: " << mutator_lock_owner << "\n"
1674       << "ThreadList lock owner tid: " << thread_list_lock_owner << "\n"
1675       << "ClassLinker classes lock owner tid: " << classes_lock_owner << "\n"
1676       << "ClassLinker dex lock owner tid: " << dex_lock_owner << "\n";
1677  }
1678}
1679
1680void Runtime::SetStatsEnabled(bool new_state) {
1681  Thread* self = Thread::Current();
1682  MutexLock mu(self, *Locks::instrument_entrypoints_lock_);
1683  if (new_state == true) {
1684    GetStats()->Clear(~0);
1685    // TODO: wouldn't it make more sense to clear _all_ threads' stats?
1686    self->GetStats()->Clear(~0);
1687    if (stats_enabled_ != new_state) {
1688      GetInstrumentation()->InstrumentQuickAllocEntryPointsLocked();
1689    }
1690  } else if (stats_enabled_ != new_state) {
1691    GetInstrumentation()->UninstrumentQuickAllocEntryPointsLocked();
1692  }
1693  stats_enabled_ = new_state;
1694}
1695
1696void Runtime::ResetStats(int kinds) {
1697  GetStats()->Clear(kinds & 0xffff);
1698  // TODO: wouldn't it make more sense to clear _all_ threads' stats?
1699  Thread::Current()->GetStats()->Clear(kinds >> 16);
1700}
1701
1702int32_t Runtime::GetStat(int kind) {
1703  RuntimeStats* stats;
1704  if (kind < (1<<16)) {
1705    stats = GetStats();
1706  } else {
1707    stats = Thread::Current()->GetStats();
1708    kind >>= 16;
1709  }
1710  switch (kind) {
1711  case KIND_ALLOCATED_OBJECTS:
1712    return stats->allocated_objects;
1713  case KIND_ALLOCATED_BYTES:
1714    return stats->allocated_bytes;
1715  case KIND_FREED_OBJECTS:
1716    return stats->freed_objects;
1717  case KIND_FREED_BYTES:
1718    return stats->freed_bytes;
1719  case KIND_GC_INVOCATIONS:
1720    return stats->gc_for_alloc_count;
1721  case KIND_CLASS_INIT_COUNT:
1722    return stats->class_init_count;
1723  case KIND_CLASS_INIT_TIME:
1724    // Convert ns to us, reduce to 32 bits.
1725    return static_cast<int>(stats->class_init_time_ns / 1000);
1726  case KIND_EXT_ALLOCATED_OBJECTS:
1727  case KIND_EXT_ALLOCATED_BYTES:
1728  case KIND_EXT_FREED_OBJECTS:
1729  case KIND_EXT_FREED_BYTES:
1730    return 0;  // backward compatibility
1731  default:
1732    LOG(FATAL) << "Unknown statistic " << kind;
1733    return -1;  // unreachable
1734  }
1735}
1736
1737void Runtime::BlockSignals() {
1738  SignalSet signals;
1739  signals.Add(SIGPIPE);
1740  // SIGQUIT is used to dump the runtime's state (including stack traces).
1741  signals.Add(SIGQUIT);
1742  // SIGUSR1 is used to initiate a GC.
1743  signals.Add(SIGUSR1);
1744  signals.Block();
1745}
1746
1747bool Runtime::AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
1748                                  bool create_peer) {
1749  ScopedTrace trace(__FUNCTION__);
1750  return Thread::Attach(thread_name, as_daemon, thread_group, create_peer) != nullptr;
1751}
1752
1753void Runtime::DetachCurrentThread() {
1754  ScopedTrace trace(__FUNCTION__);
1755  Thread* self = Thread::Current();
1756  if (self == nullptr) {
1757    LOG(FATAL) << "attempting to detach thread that is not attached";
1758  }
1759  if (self->HasManagedStack()) {
1760    LOG(FATAL) << *Thread::Current() << " attempting to detach while still running code";
1761  }
1762  thread_list_->Unregister(self);
1763}
1764
1765mirror::Throwable* Runtime::GetPreAllocatedOutOfMemoryError() {
1766  mirror::Throwable* oome = pre_allocated_OutOfMemoryError_.Read();
1767  if (oome == nullptr) {
1768    LOG(ERROR) << "Failed to return pre-allocated OOME";
1769  }
1770  return oome;
1771}
1772
1773mirror::Throwable* Runtime::GetPreAllocatedNoClassDefFoundError() {
1774  mirror::Throwable* ncdfe = pre_allocated_NoClassDefFoundError_.Read();
1775  if (ncdfe == nullptr) {
1776    LOG(ERROR) << "Failed to return pre-allocated NoClassDefFoundError";
1777  }
1778  return ncdfe;
1779}
1780
1781void Runtime::VisitConstantRoots(RootVisitor* visitor) {
1782  // Visit the classes held as static in mirror classes, these can be visited concurrently and only
1783  // need to be visited once per GC since they never change.
1784  mirror::Class::VisitRoots(visitor);
1785  mirror::Constructor::VisitRoots(visitor);
1786  mirror::Reference::VisitRoots(visitor);
1787  mirror::Method::VisitRoots(visitor);
1788  mirror::StackTraceElement::VisitRoots(visitor);
1789  mirror::String::VisitRoots(visitor);
1790  mirror::Throwable::VisitRoots(visitor);
1791  mirror::Field::VisitRoots(visitor);
1792  mirror::MethodType::VisitRoots(visitor);
1793  mirror::MethodHandleImpl::VisitRoots(visitor);
1794  mirror::MethodHandlesLookup::VisitRoots(visitor);
1795  mirror::EmulatedStackFrame::VisitRoots(visitor);
1796  mirror::ClassExt::VisitRoots(visitor);
1797  mirror::CallSite::VisitRoots(visitor);
1798  // Visit all the primitive array types classes.
1799  mirror::PrimitiveArray<uint8_t>::VisitRoots(visitor);   // BooleanArray
1800  mirror::PrimitiveArray<int8_t>::VisitRoots(visitor);    // ByteArray
1801  mirror::PrimitiveArray<uint16_t>::VisitRoots(visitor);  // CharArray
1802  mirror::PrimitiveArray<double>::VisitRoots(visitor);    // DoubleArray
1803  mirror::PrimitiveArray<float>::VisitRoots(visitor);     // FloatArray
1804  mirror::PrimitiveArray<int32_t>::VisitRoots(visitor);   // IntArray
1805  mirror::PrimitiveArray<int64_t>::VisitRoots(visitor);   // LongArray
1806  mirror::PrimitiveArray<int16_t>::VisitRoots(visitor);   // ShortArray
1807  // Visiting the roots of these ArtMethods is not currently required since all the GcRoots are
1808  // null.
1809  BufferedRootVisitor<16> buffered_visitor(visitor, RootInfo(kRootVMInternal));
1810  const PointerSize pointer_size = GetClassLinker()->GetImagePointerSize();
1811  if (HasResolutionMethod()) {
1812    resolution_method_->VisitRoots(buffered_visitor, pointer_size);
1813  }
1814  if (HasImtConflictMethod()) {
1815    imt_conflict_method_->VisitRoots(buffered_visitor, pointer_size);
1816  }
1817  if (imt_unimplemented_method_ != nullptr) {
1818    imt_unimplemented_method_->VisitRoots(buffered_visitor, pointer_size);
1819  }
1820  for (uint32_t i = 0; i < kCalleeSaveSize; ++i) {
1821    auto* m = reinterpret_cast<ArtMethod*>(callee_save_methods_[i]);
1822    if (m != nullptr) {
1823      m->VisitRoots(buffered_visitor, pointer_size);
1824    }
1825  }
1826}
1827
1828void Runtime::VisitConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags) {
1829  intern_table_->VisitRoots(visitor, flags);
1830  class_linker_->VisitRoots(visitor, flags);
1831  heap_->VisitAllocationRecords(visitor);
1832  if ((flags & kVisitRootFlagNewRoots) == 0) {
1833    // Guaranteed to have no new roots in the constant roots.
1834    VisitConstantRoots(visitor);
1835  }
1836  Dbg::VisitRoots(visitor);
1837}
1838
1839void Runtime::VisitTransactionRoots(RootVisitor* visitor) {
1840  if (preinitialization_transaction_ != nullptr) {
1841    preinitialization_transaction_->VisitRoots(visitor);
1842  }
1843}
1844
1845void Runtime::VisitNonThreadRoots(RootVisitor* visitor) {
1846  java_vm_->VisitRoots(visitor);
1847  sentinel_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
1848  pre_allocated_OutOfMemoryError_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
1849  pre_allocated_NoClassDefFoundError_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
1850  verifier::MethodVerifier::VisitStaticRoots(visitor);
1851  VisitTransactionRoots(visitor);
1852}
1853
1854void Runtime::VisitNonConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags) {
1855  VisitThreadRoots(visitor, flags);
1856  VisitNonThreadRoots(visitor);
1857}
1858
1859void Runtime::VisitThreadRoots(RootVisitor* visitor, VisitRootFlags flags) {
1860  thread_list_->VisitRoots(visitor, flags);
1861}
1862
1863void Runtime::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
1864  VisitNonConcurrentRoots(visitor, flags);
1865  VisitConcurrentRoots(visitor, flags);
1866}
1867
1868void Runtime::VisitImageRoots(RootVisitor* visitor) {
1869  for (auto* space : GetHeap()->GetContinuousSpaces()) {
1870    if (space->IsImageSpace()) {
1871      auto* image_space = space->AsImageSpace();
1872      const auto& image_header = image_space->GetImageHeader();
1873      for (int32_t i = 0, size = image_header.GetImageRoots()->GetLength(); i != size; ++i) {
1874        auto* obj = image_header.GetImageRoot(static_cast<ImageHeader::ImageRoot>(i));
1875        if (obj != nullptr) {
1876          auto* after_obj = obj;
1877          visitor->VisitRoot(&after_obj, RootInfo(kRootStickyClass));
1878          CHECK_EQ(after_obj, obj);
1879        }
1880      }
1881    }
1882  }
1883}
1884
1885static ArtMethod* CreateRuntimeMethod(ClassLinker* class_linker, LinearAlloc* linear_alloc) {
1886  const PointerSize image_pointer_size = class_linker->GetImagePointerSize();
1887  const size_t method_alignment = ArtMethod::Alignment(image_pointer_size);
1888  const size_t method_size = ArtMethod::Size(image_pointer_size);
1889  LengthPrefixedArray<ArtMethod>* method_array = class_linker->AllocArtMethodArray(
1890      Thread::Current(),
1891      linear_alloc,
1892      1);
1893  ArtMethod* method = &method_array->At(0, method_size, method_alignment);
1894  CHECK(method != nullptr);
1895  method->SetDexMethodIndex(DexFile::kDexNoIndex);
1896  CHECK(method->IsRuntimeMethod());
1897  return method;
1898}
1899
1900ArtMethod* Runtime::CreateImtConflictMethod(LinearAlloc* linear_alloc) {
1901  ClassLinker* const class_linker = GetClassLinker();
1902  ArtMethod* method = CreateRuntimeMethod(class_linker, linear_alloc);
1903  // When compiling, the code pointer will get set later when the image is loaded.
1904  const PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set_);
1905  if (IsAotCompiler()) {
1906    method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
1907  } else {
1908    method->SetEntryPointFromQuickCompiledCode(GetQuickImtConflictStub());
1909  }
1910  // Create empty conflict table.
1911  method->SetImtConflictTable(class_linker->CreateImtConflictTable(/*count*/0u, linear_alloc),
1912                              pointer_size);
1913  return method;
1914}
1915
1916void Runtime::SetImtConflictMethod(ArtMethod* method) {
1917  CHECK(method != nullptr);
1918  CHECK(method->IsRuntimeMethod());
1919  imt_conflict_method_ = method;
1920}
1921
1922ArtMethod* Runtime::CreateResolutionMethod() {
1923  auto* method = CreateRuntimeMethod(GetClassLinker(), GetLinearAlloc());
1924  // When compiling, the code pointer will get set later when the image is loaded.
1925  if (IsAotCompiler()) {
1926    PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set_);
1927    method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
1928  } else {
1929    method->SetEntryPointFromQuickCompiledCode(GetQuickResolutionStub());
1930  }
1931  return method;
1932}
1933
1934ArtMethod* Runtime::CreateCalleeSaveMethod() {
1935  auto* method = CreateRuntimeMethod(GetClassLinker(), GetLinearAlloc());
1936  PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set_);
1937  method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
1938  DCHECK_NE(instruction_set_, kNone);
1939  DCHECK(method->IsRuntimeMethod());
1940  return method;
1941}
1942
1943void Runtime::DisallowNewSystemWeaks() {
1944  CHECK(!kUseReadBarrier);
1945  monitor_list_->DisallowNewMonitors();
1946  intern_table_->ChangeWeakRootState(gc::kWeakRootStateNoReadsOrWrites);
1947  java_vm_->DisallowNewWeakGlobals();
1948  heap_->DisallowNewAllocationRecords();
1949  if (GetJit() != nullptr) {
1950    GetJit()->GetCodeCache()->DisallowInlineCacheAccess();
1951  }
1952
1953  // All other generic system-weak holders.
1954  for (gc::AbstractSystemWeakHolder* holder : system_weak_holders_) {
1955    holder->Disallow();
1956  }
1957}
1958
1959void Runtime::AllowNewSystemWeaks() {
1960  CHECK(!kUseReadBarrier);
1961  monitor_list_->AllowNewMonitors();
1962  intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal);  // TODO: Do this in the sweeping.
1963  java_vm_->AllowNewWeakGlobals();
1964  heap_->AllowNewAllocationRecords();
1965  if (GetJit() != nullptr) {
1966    GetJit()->GetCodeCache()->AllowInlineCacheAccess();
1967  }
1968
1969  // All other generic system-weak holders.
1970  for (gc::AbstractSystemWeakHolder* holder : system_weak_holders_) {
1971    holder->Allow();
1972  }
1973}
1974
1975void Runtime::BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint) {
1976  // This is used for the read barrier case that uses the thread-local
1977  // Thread::GetWeakRefAccessEnabled() flag and the checkpoint while weak ref access is disabled
1978  // (see ThreadList::RunCheckpoint).
1979  monitor_list_->BroadcastForNewMonitors();
1980  intern_table_->BroadcastForNewInterns();
1981  java_vm_->BroadcastForNewWeakGlobals();
1982  heap_->BroadcastForNewAllocationRecords();
1983  if (GetJit() != nullptr) {
1984    GetJit()->GetCodeCache()->BroadcastForInlineCacheAccess();
1985  }
1986
1987  // All other generic system-weak holders.
1988  for (gc::AbstractSystemWeakHolder* holder : system_weak_holders_) {
1989    holder->Broadcast(broadcast_for_checkpoint);
1990  }
1991}
1992
1993void Runtime::SetInstructionSet(InstructionSet instruction_set) {
1994  instruction_set_ = instruction_set;
1995  if ((instruction_set_ == kThumb2) || (instruction_set_ == kArm)) {
1996    for (int i = 0; i != kCalleeSaveSize; ++i) {
1997      CalleeSaveType type = static_cast<CalleeSaveType>(i);
1998      callee_save_method_frame_infos_[i] = arm::ArmCalleeSaveMethodFrameInfo(type);
1999    }
2000  } else if (instruction_set_ == kMips) {
2001    for (int i = 0; i != kCalleeSaveSize; ++i) {
2002      CalleeSaveType type = static_cast<CalleeSaveType>(i);
2003      callee_save_method_frame_infos_[i] = mips::MipsCalleeSaveMethodFrameInfo(type);
2004    }
2005  } else if (instruction_set_ == kMips64) {
2006    for (int i = 0; i != kCalleeSaveSize; ++i) {
2007      CalleeSaveType type = static_cast<CalleeSaveType>(i);
2008      callee_save_method_frame_infos_[i] = mips64::Mips64CalleeSaveMethodFrameInfo(type);
2009    }
2010  } else if (instruction_set_ == kX86) {
2011    for (int i = 0; i != kCalleeSaveSize; ++i) {
2012      CalleeSaveType type = static_cast<CalleeSaveType>(i);
2013      callee_save_method_frame_infos_[i] = x86::X86CalleeSaveMethodFrameInfo(type);
2014    }
2015  } else if (instruction_set_ == kX86_64) {
2016    for (int i = 0; i != kCalleeSaveSize; ++i) {
2017      CalleeSaveType type = static_cast<CalleeSaveType>(i);
2018      callee_save_method_frame_infos_[i] = x86_64::X86_64CalleeSaveMethodFrameInfo(type);
2019    }
2020  } else if (instruction_set_ == kArm64) {
2021    for (int i = 0; i != kCalleeSaveSize; ++i) {
2022      CalleeSaveType type = static_cast<CalleeSaveType>(i);
2023      callee_save_method_frame_infos_[i] = arm64::Arm64CalleeSaveMethodFrameInfo(type);
2024    }
2025  } else {
2026    UNIMPLEMENTED(FATAL) << instruction_set_;
2027  }
2028}
2029
2030void Runtime::ClearInstructionSet() {
2031  instruction_set_ = InstructionSet::kNone;
2032}
2033
2034void Runtime::SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type) {
2035  DCHECK_LT(static_cast<uint32_t>(type), kCalleeSaveSize);
2036  CHECK(method != nullptr);
2037  callee_save_methods_[static_cast<size_t>(type)] = reinterpret_cast<uintptr_t>(method);
2038}
2039
2040void Runtime::ClearCalleeSaveMethods() {
2041  for (size_t i = 0; i < kCalleeSaveSize; ++i) {
2042    callee_save_methods_[i] = reinterpret_cast<uintptr_t>(nullptr);
2043  }
2044}
2045
2046void Runtime::RegisterAppInfo(const std::vector<std::string>& code_paths,
2047                              const std::string& profile_output_filename) {
2048  if (jit_.get() == nullptr) {
2049    // We are not JITing. Nothing to do.
2050    return;
2051  }
2052
2053  VLOG(profiler) << "Register app with " << profile_output_filename
2054      << " " << android::base::Join(code_paths, ':');
2055
2056  if (profile_output_filename.empty()) {
2057    LOG(WARNING) << "JIT profile information will not be recorded: profile filename is empty.";
2058    return;
2059  }
2060  if (!FileExists(profile_output_filename)) {
2061    LOG(WARNING) << "JIT profile information will not be recorded: profile file does not exits.";
2062    return;
2063  }
2064  if (code_paths.empty()) {
2065    LOG(WARNING) << "JIT profile information will not be recorded: code paths is empty.";
2066    return;
2067  }
2068
2069  jit_->StartProfileSaver(profile_output_filename, code_paths);
2070}
2071
2072// Transaction support.
2073void Runtime::EnterTransactionMode(Transaction* transaction) {
2074  DCHECK(IsAotCompiler());
2075  DCHECK(transaction != nullptr);
2076  DCHECK(!IsActiveTransaction());
2077  preinitialization_transaction_ = transaction;
2078}
2079
2080void Runtime::ExitTransactionMode() {
2081  DCHECK(IsAotCompiler());
2082  DCHECK(IsActiveTransaction());
2083  preinitialization_transaction_ = nullptr;
2084}
2085
2086bool Runtime::IsTransactionAborted() const {
2087  if (!IsActiveTransaction()) {
2088    return false;
2089  } else {
2090    DCHECK(IsAotCompiler());
2091    return preinitialization_transaction_->IsAborted();
2092  }
2093}
2094
2095void Runtime::AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message) {
2096  DCHECK(IsAotCompiler());
2097  DCHECK(IsActiveTransaction());
2098  // Throwing an exception may cause its class initialization. If we mark the transaction
2099  // aborted before that, we may warn with a false alarm. Throwing the exception before
2100  // marking the transaction aborted avoids that.
2101  preinitialization_transaction_->ThrowAbortError(self, &abort_message);
2102  preinitialization_transaction_->Abort(abort_message);
2103}
2104
2105void Runtime::ThrowTransactionAbortError(Thread* self) {
2106  DCHECK(IsAotCompiler());
2107  DCHECK(IsActiveTransaction());
2108  // Passing nullptr means we rethrow an exception with the earlier transaction abort message.
2109  preinitialization_transaction_->ThrowAbortError(self, nullptr);
2110}
2111
2112void Runtime::RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset,
2113                                      uint8_t value, bool is_volatile) const {
2114  DCHECK(IsAotCompiler());
2115  DCHECK(IsActiveTransaction());
2116  preinitialization_transaction_->RecordWriteFieldBoolean(obj, field_offset, value, is_volatile);
2117}
2118
2119void Runtime::RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset,
2120                                   int8_t value, bool is_volatile) const {
2121  DCHECK(IsAotCompiler());
2122  DCHECK(IsActiveTransaction());
2123  preinitialization_transaction_->RecordWriteFieldByte(obj, field_offset, value, is_volatile);
2124}
2125
2126void Runtime::RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset,
2127                                   uint16_t value, bool is_volatile) const {
2128  DCHECK(IsAotCompiler());
2129  DCHECK(IsActiveTransaction());
2130  preinitialization_transaction_->RecordWriteFieldChar(obj, field_offset, value, is_volatile);
2131}
2132
2133void Runtime::RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset,
2134                                    int16_t value, bool is_volatile) const {
2135  DCHECK(IsAotCompiler());
2136  DCHECK(IsActiveTransaction());
2137  preinitialization_transaction_->RecordWriteFieldShort(obj, field_offset, value, is_volatile);
2138}
2139
2140void Runtime::RecordWriteField32(mirror::Object* obj, MemberOffset field_offset,
2141                                 uint32_t value, bool is_volatile) const {
2142  DCHECK(IsAotCompiler());
2143  DCHECK(IsActiveTransaction());
2144  preinitialization_transaction_->RecordWriteField32(obj, field_offset, value, is_volatile);
2145}
2146
2147void Runtime::RecordWriteField64(mirror::Object* obj, MemberOffset field_offset,
2148                                 uint64_t value, bool is_volatile) const {
2149  DCHECK(IsAotCompiler());
2150  DCHECK(IsActiveTransaction());
2151  preinitialization_transaction_->RecordWriteField64(obj, field_offset, value, is_volatile);
2152}
2153
2154void Runtime::RecordWriteFieldReference(mirror::Object* obj,
2155                                        MemberOffset field_offset,
2156                                        ObjPtr<mirror::Object> value,
2157                                        bool is_volatile) const {
2158  DCHECK(IsAotCompiler());
2159  DCHECK(IsActiveTransaction());
2160  preinitialization_transaction_->RecordWriteFieldReference(obj,
2161                                                            field_offset,
2162                                                            value.Ptr(),
2163                                                            is_volatile);
2164}
2165
2166void Runtime::RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const {
2167  DCHECK(IsAotCompiler());
2168  DCHECK(IsActiveTransaction());
2169  preinitialization_transaction_->RecordWriteArray(array, index, value);
2170}
2171
2172void Runtime::RecordStrongStringInsertion(ObjPtr<mirror::String> s) const {
2173  DCHECK(IsAotCompiler());
2174  DCHECK(IsActiveTransaction());
2175  preinitialization_transaction_->RecordStrongStringInsertion(s);
2176}
2177
2178void Runtime::RecordWeakStringInsertion(ObjPtr<mirror::String> s) const {
2179  DCHECK(IsAotCompiler());
2180  DCHECK(IsActiveTransaction());
2181  preinitialization_transaction_->RecordWeakStringInsertion(s);
2182}
2183
2184void Runtime::RecordStrongStringRemoval(ObjPtr<mirror::String> s) const {
2185  DCHECK(IsAotCompiler());
2186  DCHECK(IsActiveTransaction());
2187  preinitialization_transaction_->RecordStrongStringRemoval(s);
2188}
2189
2190void Runtime::RecordWeakStringRemoval(ObjPtr<mirror::String> s) const {
2191  DCHECK(IsAotCompiler());
2192  DCHECK(IsActiveTransaction());
2193  preinitialization_transaction_->RecordWeakStringRemoval(s);
2194}
2195
2196void Runtime::RecordResolveString(ObjPtr<mirror::DexCache> dex_cache,
2197                                  dex::StringIndex string_idx) const {
2198  DCHECK(IsAotCompiler());
2199  DCHECK(IsActiveTransaction());
2200  preinitialization_transaction_->RecordResolveString(dex_cache, string_idx);
2201}
2202
2203void Runtime::SetFaultMessage(const std::string& message) {
2204  MutexLock mu(Thread::Current(), fault_message_lock_);
2205  fault_message_ = message;
2206}
2207
2208void Runtime::AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* argv)
2209    const {
2210  if (GetInstrumentation()->InterpretOnly()) {
2211    argv->push_back("--compiler-filter=quicken");
2212  }
2213
2214  // Make the dex2oat instruction set match that of the launching runtime. If we have multiple
2215  // architecture support, dex2oat may be compiled as a different instruction-set than that
2216  // currently being executed.
2217  std::string instruction_set("--instruction-set=");
2218  instruction_set += GetInstructionSetString(kRuntimeISA);
2219  argv->push_back(instruction_set);
2220
2221  std::unique_ptr<const InstructionSetFeatures> features(InstructionSetFeatures::FromCppDefines());
2222  std::string feature_string("--instruction-set-features=");
2223  feature_string += features->GetFeatureString();
2224  argv->push_back(feature_string);
2225}
2226
2227void Runtime::CreateJit() {
2228  CHECK(!IsAotCompiler());
2229  if (kIsDebugBuild && GetInstrumentation()->IsForcedInterpretOnly()) {
2230    DCHECK(!jit_options_->UseJitCompilation());
2231  }
2232  std::string error_msg;
2233  jit_.reset(jit::Jit::Create(jit_options_.get(), &error_msg));
2234  if (jit_.get() == nullptr) {
2235    LOG(WARNING) << "Failed to create JIT " << error_msg;
2236    return;
2237  }
2238
2239  // In case we have a profile path passed as a command line argument,
2240  // register the current class path for profiling now. Note that we cannot do
2241  // this before we create the JIT and having it here is the most convenient way.
2242  // This is used when testing profiles with dalvikvm command as there is no
2243  // framework to register the dex files for profiling.
2244  if (jit_options_->GetSaveProfilingInfo() &&
2245      !jit_options_->GetProfileSaverOptions().GetProfilePath().empty()) {
2246    std::vector<std::string> dex_filenames;
2247    Split(class_path_string_, ':', &dex_filenames);
2248    RegisterAppInfo(dex_filenames, jit_options_->GetProfileSaverOptions().GetProfilePath());
2249  }
2250}
2251
2252bool Runtime::CanRelocate() const {
2253  return !IsAotCompiler() || compiler_callbacks_->IsRelocationPossible();
2254}
2255
2256bool Runtime::IsCompilingBootImage() const {
2257  return IsCompiler() && compiler_callbacks_->IsBootImage();
2258}
2259
2260void Runtime::SetResolutionMethod(ArtMethod* method) {
2261  CHECK(method != nullptr);
2262  CHECK(method->IsRuntimeMethod()) << method;
2263  resolution_method_ = method;
2264}
2265
2266void Runtime::SetImtUnimplementedMethod(ArtMethod* method) {
2267  CHECK(method != nullptr);
2268  CHECK(method->IsRuntimeMethod());
2269  imt_unimplemented_method_ = method;
2270}
2271
2272void Runtime::FixupConflictTables() {
2273  // We can only do this after the class linker is created.
2274  const PointerSize pointer_size = GetClassLinker()->GetImagePointerSize();
2275  if (imt_unimplemented_method_->GetImtConflictTable(pointer_size) == nullptr) {
2276    imt_unimplemented_method_->SetImtConflictTable(
2277        ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size),
2278        pointer_size);
2279  }
2280  if (imt_conflict_method_->GetImtConflictTable(pointer_size) == nullptr) {
2281    imt_conflict_method_->SetImtConflictTable(
2282          ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size),
2283          pointer_size);
2284  }
2285}
2286
2287bool Runtime::IsVerificationEnabled() const {
2288  return verify_ == verifier::VerifyMode::kEnable ||
2289      verify_ == verifier::VerifyMode::kSoftFail;
2290}
2291
2292bool Runtime::IsVerificationSoftFail() const {
2293  return verify_ == verifier::VerifyMode::kSoftFail;
2294}
2295
2296bool Runtime::IsAsyncDeoptimizeable(uintptr_t code) const {
2297  // We only support async deopt (ie the compiled code is not explicitly asking for
2298  // deopt, but something else like the debugger) in debuggable JIT code.
2299  // We could look at the oat file where `code` is being defined,
2300  // and check whether it's been compiled debuggable, but we decided to
2301  // only rely on the JIT for debuggable apps.
2302  return IsJavaDebuggable() &&
2303      GetJit() != nullptr &&
2304      GetJit()->GetCodeCache()->ContainsPc(reinterpret_cast<const void*>(code));
2305}
2306
2307LinearAlloc* Runtime::CreateLinearAlloc() {
2308  // For 64 bit compilers, it needs to be in low 4GB in the case where we are cross compiling for a
2309  // 32 bit target. In this case, we have 32 bit pointers in the dex cache arrays which can't hold
2310  // when we have 64 bit ArtMethod pointers.
2311  return (IsAotCompiler() && Is64BitInstructionSet(kRuntimeISA))
2312      ? new LinearAlloc(low_4gb_arena_pool_.get())
2313      : new LinearAlloc(arena_pool_.get());
2314}
2315
2316double Runtime::GetHashTableMinLoadFactor() const {
2317  return is_low_memory_mode_ ? kLowMemoryMinLoadFactor : kNormalMinLoadFactor;
2318}
2319
2320double Runtime::GetHashTableMaxLoadFactor() const {
2321  return is_low_memory_mode_ ? kLowMemoryMaxLoadFactor : kNormalMaxLoadFactor;
2322}
2323
2324void Runtime::UpdateProcessState(ProcessState process_state) {
2325  ProcessState old_process_state = process_state_;
2326  process_state_ = process_state;
2327  GetHeap()->UpdateProcessState(old_process_state, process_state);
2328}
2329
2330void Runtime::RegisterSensitiveThread() const {
2331  Thread::SetJitSensitiveThread();
2332}
2333
2334// Returns true if JIT compilations are enabled. GetJit() will be not null in this case.
2335bool Runtime::UseJitCompilation() const {
2336  return (jit_ != nullptr) && jit_->UseJitCompilation();
2337}
2338
2339void Runtime::EnvSnapshot::TakeSnapshot() {
2340  char** env = GetEnviron();
2341  for (size_t i = 0; env[i] != nullptr; ++i) {
2342    name_value_pairs_.emplace_back(new std::string(env[i]));
2343  }
2344  // The strings in name_value_pairs_ retain ownership of the c_str, but we assign pointers
2345  // for quick use by GetSnapshot.  This avoids allocation and copying cost at Exec.
2346  c_env_vector_.reset(new char*[name_value_pairs_.size() + 1]);
2347  for (size_t i = 0; env[i] != nullptr; ++i) {
2348    c_env_vector_[i] = const_cast<char*>(name_value_pairs_[i]->c_str());
2349  }
2350  c_env_vector_[name_value_pairs_.size()] = nullptr;
2351}
2352
2353char** Runtime::EnvSnapshot::GetSnapshot() const {
2354  return c_env_vector_.get();
2355}
2356
2357void Runtime::AddSystemWeakHolder(gc::AbstractSystemWeakHolder* holder) {
2358  gc::ScopedGCCriticalSection gcs(Thread::Current(),
2359                                  gc::kGcCauseAddRemoveSystemWeakHolder,
2360                                  gc::kCollectorTypeAddRemoveSystemWeakHolder);
2361  // Note: The ScopedGCCriticalSection also ensures that the rest of the function is in
2362  //       a critical section.
2363  system_weak_holders_.push_back(holder);
2364}
2365
2366void Runtime::RemoveSystemWeakHolder(gc::AbstractSystemWeakHolder* holder) {
2367  gc::ScopedGCCriticalSection gcs(Thread::Current(),
2368                                  gc::kGcCauseAddRemoveSystemWeakHolder,
2369                                  gc::kCollectorTypeAddRemoveSystemWeakHolder);
2370  auto it = std::find(system_weak_holders_.begin(), system_weak_holders_.end(), holder);
2371  if (it != system_weak_holders_.end()) {
2372    system_weak_holders_.erase(it);
2373  }
2374}
2375
2376RuntimeCallbacks* Runtime::GetRuntimeCallbacks() {
2377  return callbacks_.get();
2378}
2379
2380// Used to patch boot image method entry point to interpreter bridge.
2381class UpdateEntryPointsClassVisitor : public ClassVisitor {
2382 public:
2383  explicit UpdateEntryPointsClassVisitor(instrumentation::Instrumentation* instrumentation)
2384      : instrumentation_(instrumentation) {}
2385
2386  bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
2387    auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
2388    for (auto& m : klass->GetMethods(pointer_size)) {
2389      const void* code = m.GetEntryPointFromQuickCompiledCode();
2390      if (Runtime::Current()->GetHeap()->IsInBootImageOatFile(code) &&
2391          !m.IsNative() &&
2392          !m.IsProxyMethod()) {
2393        instrumentation_->UpdateMethodsCodeForJavaDebuggable(&m, GetQuickToInterpreterBridge());
2394      }
2395    }
2396    return true;
2397  }
2398
2399 private:
2400  instrumentation::Instrumentation* const instrumentation_;
2401};
2402
2403void Runtime::SetJavaDebuggable(bool value) {
2404  is_java_debuggable_ = value;
2405  // Do not call DeoptimizeBootImage just yet, the runtime may still be starting up.
2406}
2407
2408void Runtime::DeoptimizeBootImage() {
2409  // If we've already started and we are setting this runtime to debuggable,
2410  // we patch entry points of methods in boot image to interpreter bridge, as
2411  // boot image code may be AOT compiled as not debuggable.
2412  if (!GetInstrumentation()->IsForcedInterpretOnly()) {
2413    ScopedObjectAccess soa(Thread::Current());
2414    UpdateEntryPointsClassVisitor visitor(GetInstrumentation());
2415    GetClassLinker()->VisitClasses(&visitor);
2416  }
2417}
2418
2419}  // namespace art
2420