1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "runtime.h"
18
19// sys/mount.h has to come before linux/fs.h due to redefinition of MS_RDONLY, MS_BIND, etc
20#include <sys/mount.h>
21#ifdef __linux__
22#include <linux/fs.h>
23#include <sys/prctl.h>
24#endif
25
26#include <fcntl.h>
27#include <signal.h>
28#include <sys/syscall.h>
29
30#if defined(__APPLE__)
31#include <crt_externs.h>  // for _NSGetEnviron
32#endif
33
34#include <cstdio>
35#include <cstdlib>
36#include <limits>
37#include <vector>
38
39#include "android-base/strings.h"
40
41#include "aot_class_linker.h"
42#include "arch/arm/quick_method_frame_info_arm.h"
43#include "arch/arm/registers_arm.h"
44#include "arch/arm64/quick_method_frame_info_arm64.h"
45#include "arch/arm64/registers_arm64.h"
46#include "arch/instruction_set_features.h"
47#include "arch/mips/quick_method_frame_info_mips.h"
48#include "arch/mips/registers_mips.h"
49#include "arch/mips64/quick_method_frame_info_mips64.h"
50#include "arch/mips64/registers_mips64.h"
51#include "arch/x86/quick_method_frame_info_x86.h"
52#include "arch/x86/registers_x86.h"
53#include "arch/x86_64/quick_method_frame_info_x86_64.h"
54#include "arch/x86_64/registers_x86_64.h"
55#include "art_field-inl.h"
56#include "art_method-inl.h"
57#include "asm_support.h"
58#include "asm_support_check.h"
59#include "base/aborting.h"
60#include "base/arena_allocator.h"
61#include "base/atomic.h"
62#include "base/dumpable.h"
63#include "base/enums.h"
64#include "base/file_utils.h"
65#include "base/memory_tool.h"
66#include "base/mutex.h"
67#include "base/os.h"
68#include "base/quasi_atomic.h"
69#include "base/stl_util.h"
70#include "base/systrace.h"
71#include "base/unix_file/fd_file.h"
72#include "base/utils.h"
73#include "class_linker-inl.h"
74#include "compiler_callbacks.h"
75#include "debugger.h"
76#include "dex/art_dex_file_loader.h"
77#include "dex/dex_file_loader.h"
78#include "elf_file.h"
79#include "entrypoints/runtime_asm_entrypoints.h"
80#include "experimental_flags.h"
81#include "fault_handler.h"
82#include "gc/accounting/card_table-inl.h"
83#include "gc/heap.h"
84#include "gc/scoped_gc_critical_section.h"
85#include "gc/space/image_space.h"
86#include "gc/space/space-inl.h"
87#include "gc/system_weak.h"
88#include "handle_scope-inl.h"
89#include "hidden_api.h"
90#include "image-inl.h"
91#include "instrumentation.h"
92#include "intern_table.h"
93#include "interpreter/interpreter.h"
94#include "java_vm_ext.h"
95#include "jit/jit.h"
96#include "jit/jit_code_cache.h"
97#include "jit/profile_saver.h"
98#include "jni_internal.h"
99#include "linear_alloc.h"
100#include "memory_representation.h"
101#include "mirror/array.h"
102#include "mirror/class-inl.h"
103#include "mirror/class_ext.h"
104#include "mirror/class_loader.h"
105#include "mirror/emulated_stack_frame.h"
106#include "mirror/field.h"
107#include "mirror/method.h"
108#include "mirror/method_handle_impl.h"
109#include "mirror/method_handles_lookup.h"
110#include "mirror/method_type.h"
111#include "mirror/stack_trace_element.h"
112#include "mirror/throwable.h"
113#include "mirror/var_handle.h"
114#include "monitor.h"
115#include "native/dalvik_system_DexFile.h"
116#include "native/dalvik_system_VMDebug.h"
117#include "native/dalvik_system_VMRuntime.h"
118#include "native/dalvik_system_VMStack.h"
119#include "native/dalvik_system_ZygoteHooks.h"
120#include "native/java_lang_Class.h"
121#include "native/java_lang_Object.h"
122#include "native/java_lang_String.h"
123#include "native/java_lang_StringFactory.h"
124#include "native/java_lang_System.h"
125#include "native/java_lang_Thread.h"
126#include "native/java_lang_Throwable.h"
127#include "native/java_lang_VMClassLoader.h"
128#include "native/java_lang_invoke_MethodHandleImpl.h"
129#include "native/java_lang_ref_FinalizerReference.h"
130#include "native/java_lang_ref_Reference.h"
131#include "native/java_lang_reflect_Array.h"
132#include "native/java_lang_reflect_Constructor.h"
133#include "native/java_lang_reflect_Executable.h"
134#include "native/java_lang_reflect_Field.h"
135#include "native/java_lang_reflect_Method.h"
136#include "native/java_lang_reflect_Parameter.h"
137#include "native/java_lang_reflect_Proxy.h"
138#include "native/java_util_concurrent_atomic_AtomicLong.h"
139#include "native/libcore_util_CharsetUtils.h"
140#include "native/org_apache_harmony_dalvik_ddmc_DdmServer.h"
141#include "native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.h"
142#include "native/sun_misc_Unsafe.h"
143#include "native_bridge_art_interface.h"
144#include "native_stack_dump.h"
145#include "nativehelper/scoped_local_ref.h"
146#include "oat_file.h"
147#include "oat_file_manager.h"
148#include "object_callbacks.h"
149#include "parsed_options.h"
150#include "quick/quick_method_frame_info.h"
151#include "reflection.h"
152#include "runtime_callbacks.h"
153#include "runtime_intrinsics.h"
154#include "runtime_options.h"
155#include "scoped_thread_state_change-inl.h"
156#include "sigchain.h"
157#include "signal_catcher.h"
158#include "signal_set.h"
159#include "thread.h"
160#include "thread_list.h"
161#include "ti/agent.h"
162#include "trace.h"
163#include "transaction.h"
164#include "vdex_file.h"
165#include "verifier/method_verifier.h"
166#include "well_known_classes.h"
167
168#ifdef ART_TARGET_ANDROID
169#include <android/set_abort_message.h>
170#endif
171
172namespace art {
173
174// If a signal isn't handled properly, enable a handler that attempts to dump the Java stack.
175static constexpr bool kEnableJavaStackTraceHandler = false;
176// Tuned by compiling GmsCore under perf and measuring time spent in DescriptorEquals for class
177// linking.
178static constexpr double kLowMemoryMinLoadFactor = 0.5;
179static constexpr double kLowMemoryMaxLoadFactor = 0.8;
180static constexpr double kNormalMinLoadFactor = 0.4;
181static constexpr double kNormalMaxLoadFactor = 0.7;
182
183// Extra added to the default heap growth multiplier. Used to adjust the GC ergonomics for the read
184// barrier config.
185static constexpr double kExtraDefaultHeapGrowthMultiplier = kUseReadBarrier ? 1.0 : 0.0;
186
187Runtime* Runtime::instance_ = nullptr;
188
189struct TraceConfig {
190  Trace::TraceMode trace_mode;
191  Trace::TraceOutputMode trace_output_mode;
192  std::string trace_file;
193  size_t trace_file_size;
194};
195
196namespace {
197#ifdef __APPLE__
198inline char** GetEnviron() {
199  // When Google Test is built as a framework on MacOS X, the environ variable
200  // is unavailable. Apple's documentation (man environ) recommends using
201  // _NSGetEnviron() instead.
202  return *_NSGetEnviron();
203}
204#else
205// Some POSIX platforms expect you to declare environ. extern "C" makes
206// it reside in the global namespace.
207extern "C" char** environ;
208inline char** GetEnviron() { return environ; }
209#endif
210}  // namespace
211
212Runtime::Runtime()
213    : resolution_method_(nullptr),
214      imt_conflict_method_(nullptr),
215      imt_unimplemented_method_(nullptr),
216      instruction_set_(InstructionSet::kNone),
217      compiler_callbacks_(nullptr),
218      is_zygote_(false),
219      must_relocate_(false),
220      is_concurrent_gc_enabled_(true),
221      is_explicit_gc_disabled_(false),
222      dex2oat_enabled_(true),
223      image_dex2oat_enabled_(true),
224      default_stack_size_(0),
225      heap_(nullptr),
226      max_spins_before_thin_lock_inflation_(Monitor::kDefaultMaxSpinsBeforeThinLockInflation),
227      monitor_list_(nullptr),
228      monitor_pool_(nullptr),
229      thread_list_(nullptr),
230      intern_table_(nullptr),
231      class_linker_(nullptr),
232      signal_catcher_(nullptr),
233      use_tombstoned_traces_(false),
234      java_vm_(nullptr),
235      fault_message_lock_("Fault message lock"),
236      fault_message_(""),
237      threads_being_born_(0),
238      shutdown_cond_(new ConditionVariable("Runtime shutdown", *Locks::runtime_shutdown_lock_)),
239      shutting_down_(false),
240      shutting_down_started_(false),
241      started_(false),
242      finished_starting_(false),
243      vfprintf_(nullptr),
244      exit_(nullptr),
245      abort_(nullptr),
246      stats_enabled_(false),
247      is_running_on_memory_tool_(RUNNING_ON_MEMORY_TOOL),
248      instrumentation_(),
249      main_thread_group_(nullptr),
250      system_thread_group_(nullptr),
251      system_class_loader_(nullptr),
252      dump_gc_performance_on_shutdown_(false),
253      preinitialization_transactions_(),
254      verify_(verifier::VerifyMode::kNone),
255      allow_dex_file_fallback_(true),
256      target_sdk_version_(kUnsetSdkVersion),
257      implicit_null_checks_(false),
258      implicit_so_checks_(false),
259      implicit_suspend_checks_(false),
260      no_sig_chain_(false),
261      force_native_bridge_(false),
262      is_native_bridge_loaded_(false),
263      is_native_debuggable_(false),
264      async_exceptions_thrown_(false),
265      is_java_debuggable_(false),
266      zygote_max_failed_boots_(0),
267      experimental_flags_(ExperimentalFlags::kNone),
268      oat_file_manager_(nullptr),
269      is_low_memory_mode_(false),
270      safe_mode_(false),
271      hidden_api_policy_(hiddenapi::EnforcementPolicy::kNoChecks),
272      pending_hidden_api_warning_(false),
273      dedupe_hidden_api_warnings_(true),
274      always_set_hidden_api_warning_flag_(false),
275      hidden_api_access_event_log_rate_(0),
276      dump_native_stack_on_sig_quit_(true),
277      pruned_dalvik_cache_(false),
278      // Initially assume we perceive jank in case the process state is never updated.
279      process_state_(kProcessStateJankPerceptible),
280      zygote_no_threads_(false) {
281  static_assert(Runtime::kCalleeSaveSize ==
282                    static_cast<uint32_t>(CalleeSaveType::kLastCalleeSaveType), "Unexpected size");
283
284  CheckAsmSupportOffsetsAndSizes();
285  std::fill(callee_save_methods_, callee_save_methods_ + arraysize(callee_save_methods_), 0u);
286  interpreter::CheckInterpreterAsmConstants();
287  callbacks_.reset(new RuntimeCallbacks());
288  for (size_t i = 0; i <= static_cast<size_t>(DeoptimizationKind::kLast); ++i) {
289    deoptimization_counts_[i] = 0u;
290  }
291}
292
293Runtime::~Runtime() {
294  ScopedTrace trace("Runtime shutdown");
295  if (is_native_bridge_loaded_) {
296    UnloadNativeBridge();
297  }
298
299  Thread* self = Thread::Current();
300  const bool attach_shutdown_thread = self == nullptr;
301  if (attach_shutdown_thread) {
302    // We can only create a peer if the runtime is actually started. This is only not true during
303    // some tests. If there is extreme memory pressure the allocation of the thread peer can fail.
304    // In this case we will just try again without allocating a peer so that shutdown can continue.
305    // Very few things are actually capable of distinguishing between the peer & peerless states so
306    // this should be fine.
307    bool thread_attached = AttachCurrentThread("Shutdown thread",
308                                               /* as_daemon */ false,
309                                               GetSystemThreadGroup(),
310                                               /* Create peer */ IsStarted());
311    if (UNLIKELY(!thread_attached)) {
312      LOG(WARNING) << "Failed to attach shutdown thread. Trying again without a peer.";
313      CHECK(AttachCurrentThread("Shutdown thread (no java peer)",
314                                /* as_daemon */   false,
315                                /* thread_group*/ nullptr,
316                                /* Create peer */ false));
317    }
318    self = Thread::Current();
319  } else {
320    LOG(WARNING) << "Current thread not detached in Runtime shutdown";
321  }
322
323  if (dump_gc_performance_on_shutdown_) {
324    ScopedLogSeverity sls(LogSeverity::INFO);
325    // This can't be called from the Heap destructor below because it
326    // could call RosAlloc::InspectAll() which needs the thread_list
327    // to be still alive.
328    heap_->DumpGcPerformanceInfo(LOG_STREAM(INFO));
329  }
330
331  if (jit_ != nullptr) {
332    // Stop the profile saver thread before marking the runtime as shutting down.
333    // The saver will try to dump the profiles before being sopped and that
334    // requires holding the mutator lock.
335    jit_->StopProfileSaver();
336  }
337
338  {
339    ScopedTrace trace2("Wait for shutdown cond");
340    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
341    shutting_down_started_ = true;
342    while (threads_being_born_ > 0) {
343      shutdown_cond_->Wait(self);
344    }
345    shutting_down_ = true;
346  }
347  // Shutdown and wait for the daemons.
348  CHECK(self != nullptr);
349  if (IsFinishedStarting()) {
350    ScopedTrace trace2("Waiting for Daemons");
351    self->ClearException();
352    self->GetJniEnv()->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
353                                            WellKnownClasses::java_lang_Daemons_stop);
354  }
355
356  Trace::Shutdown();
357
358  // Report death. Clients me require a working thread, still, so do it before GC completes and
359  // all non-daemon threads are done.
360  {
361    ScopedObjectAccess soa(self);
362    callbacks_->NextRuntimePhase(RuntimePhaseCallback::RuntimePhase::kDeath);
363  }
364
365  if (attach_shutdown_thread) {
366    DetachCurrentThread();
367    self = nullptr;
368  }
369
370  // Make sure to let the GC complete if it is running.
371  heap_->WaitForGcToComplete(gc::kGcCauseBackground, self);
372  heap_->DeleteThreadPool();
373  if (jit_ != nullptr) {
374    ScopedTrace trace2("Delete jit");
375    VLOG(jit) << "Deleting jit thread pool";
376    // Delete thread pool before the thread list since we don't want to wait forever on the
377    // JIT compiler threads.
378    jit_->DeleteThreadPool();
379  }
380
381  // Make sure our internal threads are dead before we start tearing down things they're using.
382  GetRuntimeCallbacks()->StopDebugger();
383  delete signal_catcher_;
384
385  // Make sure all other non-daemon threads have terminated, and all daemon threads are suspended.
386  {
387    ScopedTrace trace2("Delete thread list");
388    thread_list_->ShutDown();
389  }
390
391  // TODO Maybe do some locking.
392  for (auto& agent : agents_) {
393    agent->Unload();
394  }
395
396  // TODO Maybe do some locking
397  for (auto& plugin : plugins_) {
398    plugin.Unload();
399  }
400
401  // Finally delete the thread list.
402  delete thread_list_;
403
404  // Delete the JIT after thread list to ensure that there is no remaining threads which could be
405  // accessing the instrumentation when we delete it.
406  if (jit_ != nullptr) {
407    VLOG(jit) << "Deleting jit";
408    jit_.reset(nullptr);
409  }
410
411  // Shutdown the fault manager if it was initialized.
412  fault_manager.Shutdown();
413
414  ScopedTrace trace2("Delete state");
415  delete monitor_list_;
416  delete monitor_pool_;
417  delete class_linker_;
418  delete heap_;
419  delete intern_table_;
420  delete oat_file_manager_;
421  Thread::Shutdown();
422  QuasiAtomic::Shutdown();
423  verifier::MethodVerifier::Shutdown();
424
425  // Destroy allocators before shutting down the MemMap because they may use it.
426  java_vm_.reset();
427  linear_alloc_.reset();
428  low_4gb_arena_pool_.reset();
429  arena_pool_.reset();
430  jit_arena_pool_.reset();
431  protected_fault_page_.reset();
432  MemMap::Shutdown();
433
434  // TODO: acquire a static mutex on Runtime to avoid racing.
435  CHECK(instance_ == nullptr || instance_ == this);
436  instance_ = nullptr;
437
438  // Well-known classes must be deleted or it is impossible to successfully start another Runtime
439  // instance. We rely on a small initialization order issue in Runtime::Start() that requires
440  // elements of WellKnownClasses to be null, see b/65500943.
441  WellKnownClasses::Clear();
442}
443
444struct AbortState {
445  void Dump(std::ostream& os) const {
446    if (gAborting > 1) {
447      os << "Runtime aborting --- recursively, so no thread-specific detail!\n";
448      DumpRecursiveAbort(os);
449      return;
450    }
451    gAborting++;
452    os << "Runtime aborting...\n";
453    if (Runtime::Current() == nullptr) {
454      os << "(Runtime does not yet exist!)\n";
455      DumpNativeStack(os, GetTid(), nullptr, "  native: ", nullptr);
456      return;
457    }
458    Thread* self = Thread::Current();
459
460    // Dump all threads first and then the aborting thread. While this is counter the logical flow,
461    // it improves the chance of relevant data surviving in the Android logs.
462
463    DumpAllThreads(os, self);
464
465    if (self == nullptr) {
466      os << "(Aborting thread was not attached to runtime!)\n";
467      DumpKernelStack(os, GetTid(), "  kernel: ", false);
468      DumpNativeStack(os, GetTid(), nullptr, "  native: ", nullptr);
469    } else {
470      os << "Aborting thread:\n";
471      if (Locks::mutator_lock_->IsExclusiveHeld(self) || Locks::mutator_lock_->IsSharedHeld(self)) {
472        DumpThread(os, self);
473      } else {
474        if (Locks::mutator_lock_->SharedTryLock(self)) {
475          DumpThread(os, self);
476          Locks::mutator_lock_->SharedUnlock(self);
477        }
478      }
479    }
480  }
481
482  // No thread-safety analysis as we do explicitly test for holding the mutator lock.
483  void DumpThread(std::ostream& os, Thread* self) const NO_THREAD_SAFETY_ANALYSIS {
484    DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self) || Locks::mutator_lock_->IsSharedHeld(self));
485    self->Dump(os);
486    if (self->IsExceptionPending()) {
487      mirror::Throwable* exception = self->GetException();
488      os << "Pending exception " << exception->Dump();
489    }
490  }
491
492  void DumpAllThreads(std::ostream& os, Thread* self) const {
493    Runtime* runtime = Runtime::Current();
494    if (runtime != nullptr) {
495      ThreadList* thread_list = runtime->GetThreadList();
496      if (thread_list != nullptr) {
497        bool tll_already_held = Locks::thread_list_lock_->IsExclusiveHeld(self);
498        bool ml_already_held = Locks::mutator_lock_->IsSharedHeld(self);
499        if (!tll_already_held || !ml_already_held) {
500          os << "Dumping all threads without appropriate locks held:"
501              << (!tll_already_held ? " thread list lock" : "")
502              << (!ml_already_held ? " mutator lock" : "")
503              << "\n";
504        }
505        os << "All threads:\n";
506        thread_list->Dump(os);
507      }
508    }
509  }
510
511  // For recursive aborts.
512  void DumpRecursiveAbort(std::ostream& os) const NO_THREAD_SAFETY_ANALYSIS {
513    // The only thing we'll attempt is dumping the native stack of the current thread. We will only
514    // try this if we haven't exceeded an arbitrary amount of recursions, to recover and actually
515    // die.
516    // Note: as we're using a global counter for the recursive abort detection, there is a potential
517    //       race here and it is not OK to just print when the counter is "2" (one from
518    //       Runtime::Abort(), one from previous Dump() call). Use a number that seems large enough.
519    static constexpr size_t kOnlyPrintWhenRecursionLessThan = 100u;
520    if (gAborting < kOnlyPrintWhenRecursionLessThan) {
521      gAborting++;
522      DumpNativeStack(os, GetTid());
523    }
524  }
525};
526
527void Runtime::Abort(const char* msg) {
528  auto old_value = gAborting.fetch_add(1);  // set before taking any locks
529
530#ifdef ART_TARGET_ANDROID
531  if (old_value == 0) {
532    // Only set the first abort message.
533    android_set_abort_message(msg);
534  }
535#else
536  UNUSED(old_value);
537#endif
538
539#ifdef ART_TARGET_ANDROID
540  android_set_abort_message(msg);
541#endif
542
543  // Ensure that we don't have multiple threads trying to abort at once,
544  // which would result in significantly worse diagnostics.
545  MutexLock mu(Thread::Current(), *Locks::abort_lock_);
546
547  // Get any pending output out of the way.
548  fflush(nullptr);
549
550  // Many people have difficulty distinguish aborts from crashes,
551  // so be explicit.
552  // Note: use cerr on the host to print log lines immediately, so we get at least some output
553  //       in case of recursive aborts. We lose annotation with the source file and line number
554  //       here, which is a minor issue. The same is significantly more complicated on device,
555  //       which is why we ignore the issue there.
556  AbortState state;
557  if (kIsTargetBuild) {
558    LOG(FATAL_WITHOUT_ABORT) << Dumpable<AbortState>(state);
559  } else {
560    std::cerr << Dumpable<AbortState>(state);
561  }
562
563  // Sometimes we dump long messages, and the Android abort message only retains the first line.
564  // In those cases, just log the message again, to avoid logcat limits.
565  if (msg != nullptr && strchr(msg, '\n') != nullptr) {
566    LOG(FATAL_WITHOUT_ABORT) << msg;
567  }
568
569  // Call the abort hook if we have one.
570  if (Runtime::Current() != nullptr && Runtime::Current()->abort_ != nullptr) {
571    LOG(FATAL_WITHOUT_ABORT) << "Calling abort hook...";
572    Runtime::Current()->abort_();
573    // notreached
574    LOG(FATAL_WITHOUT_ABORT) << "Unexpectedly returned from abort hook!";
575  }
576
577#if defined(__GLIBC__)
578  // TODO: we ought to be able to use pthread_kill(3) here (or abort(3),
579  // which POSIX defines in terms of raise(3), which POSIX defines in terms
580  // of pthread_kill(3)). On Linux, though, libcorkscrew can't unwind through
581  // libpthread, which means the stacks we dump would be useless. Calling
582  // tgkill(2) directly avoids that.
583  syscall(__NR_tgkill, getpid(), GetTid(), SIGABRT);
584  // TODO: LLVM installs it's own SIGABRT handler so exit to be safe... Can we disable that in LLVM?
585  // If not, we could use sigaction(3) before calling tgkill(2) and lose this call to exit(3).
586  exit(1);
587#else
588  abort();
589#endif
590  // notreached
591}
592
593void Runtime::PreZygoteFork() {
594  heap_->PreZygoteFork();
595}
596
597void Runtime::CallExitHook(jint status) {
598  if (exit_ != nullptr) {
599    ScopedThreadStateChange tsc(Thread::Current(), kNative);
600    exit_(status);
601    LOG(WARNING) << "Exit hook returned instead of exiting!";
602  }
603}
604
605void Runtime::SweepSystemWeaks(IsMarkedVisitor* visitor) {
606  GetInternTable()->SweepInternTableWeaks(visitor);
607  GetMonitorList()->SweepMonitorList(visitor);
608  GetJavaVM()->SweepJniWeakGlobals(visitor);
609  GetHeap()->SweepAllocationRecords(visitor);
610  if (GetJit() != nullptr) {
611    // Visit JIT literal tables. Objects in these tables are classes and strings
612    // and only classes can be affected by class unloading. The strings always
613    // stay alive as they are strongly interned.
614    // TODO: Move this closer to CleanupClassLoaders, to avoid blocking weak accesses
615    // from mutators. See b/32167580.
616    GetJit()->GetCodeCache()->SweepRootTables(visitor);
617  }
618
619  // All other generic system-weak holders.
620  for (gc::AbstractSystemWeakHolder* holder : system_weak_holders_) {
621    holder->Sweep(visitor);
622  }
623}
624
625bool Runtime::ParseOptions(const RuntimeOptions& raw_options,
626                           bool ignore_unrecognized,
627                           RuntimeArgumentMap* runtime_options) {
628  Locks::Init();
629  InitLogging(/* argv */ nullptr, Abort);  // Calls Locks::Init() as a side effect.
630  bool parsed = ParsedOptions::Parse(raw_options, ignore_unrecognized, runtime_options);
631  if (!parsed) {
632    LOG(ERROR) << "Failed to parse options";
633    return false;
634  }
635  return true;
636}
637
638// Callback to check whether it is safe to call Abort (e.g., to use a call to
639// LOG(FATAL)).  It is only safe to call Abort if the runtime has been created,
640// properly initialized, and has not shut down.
641static bool IsSafeToCallAbort() NO_THREAD_SAFETY_ANALYSIS {
642  Runtime* runtime = Runtime::Current();
643  return runtime != nullptr && runtime->IsStarted() && !runtime->IsShuttingDownLocked();
644}
645
646bool Runtime::Create(RuntimeArgumentMap&& runtime_options) {
647  // TODO: acquire a static mutex on Runtime to avoid racing.
648  if (Runtime::instance_ != nullptr) {
649    return false;
650  }
651  instance_ = new Runtime;
652  Locks::SetClientCallback(IsSafeToCallAbort);
653  if (!instance_->Init(std::move(runtime_options))) {
654    // TODO: Currently deleting the instance will abort the runtime on destruction. Now This will
655    // leak memory, instead. Fix the destructor. b/19100793.
656    // delete instance_;
657    instance_ = nullptr;
658    return false;
659  }
660  return true;
661}
662
663bool Runtime::Create(const RuntimeOptions& raw_options, bool ignore_unrecognized) {
664  RuntimeArgumentMap runtime_options;
665  return ParseOptions(raw_options, ignore_unrecognized, &runtime_options) &&
666      Create(std::move(runtime_options));
667}
668
669static jobject CreateSystemClassLoader(Runtime* runtime) {
670  if (runtime->IsAotCompiler() && !runtime->GetCompilerCallbacks()->IsBootImage()) {
671    return nullptr;
672  }
673
674  ScopedObjectAccess soa(Thread::Current());
675  ClassLinker* cl = Runtime::Current()->GetClassLinker();
676  auto pointer_size = cl->GetImagePointerSize();
677
678  StackHandleScope<2> hs(soa.Self());
679  Handle<mirror::Class> class_loader_class(
680      hs.NewHandle(soa.Decode<mirror::Class>(WellKnownClasses::java_lang_ClassLoader)));
681  CHECK(cl->EnsureInitialized(soa.Self(), class_loader_class, true, true));
682
683  ArtMethod* getSystemClassLoader = class_loader_class->FindClassMethod(
684      "getSystemClassLoader", "()Ljava/lang/ClassLoader;", pointer_size);
685  CHECK(getSystemClassLoader != nullptr);
686  CHECK(getSystemClassLoader->IsStatic());
687
688  JValue result = InvokeWithJValues(soa,
689                                    nullptr,
690                                    jni::EncodeArtMethod(getSystemClassLoader),
691                                    nullptr);
692  JNIEnv* env = soa.Self()->GetJniEnv();
693  ScopedLocalRef<jobject> system_class_loader(env, soa.AddLocalReference<jobject>(result.GetL()));
694  CHECK(system_class_loader.get() != nullptr);
695
696  soa.Self()->SetClassLoaderOverride(system_class_loader.get());
697
698  Handle<mirror::Class> thread_class(
699      hs.NewHandle(soa.Decode<mirror::Class>(WellKnownClasses::java_lang_Thread)));
700  CHECK(cl->EnsureInitialized(soa.Self(), thread_class, true, true));
701
702  ArtField* contextClassLoader =
703      thread_class->FindDeclaredInstanceField("contextClassLoader", "Ljava/lang/ClassLoader;");
704  CHECK(contextClassLoader != nullptr);
705
706  // We can't run in a transaction yet.
707  contextClassLoader->SetObject<false>(
708      soa.Self()->GetPeer(),
709      soa.Decode<mirror::ClassLoader>(system_class_loader.get()).Ptr());
710
711  return env->NewGlobalRef(system_class_loader.get());
712}
713
714std::string Runtime::GetPatchoatExecutable() const {
715  if (!patchoat_executable_.empty()) {
716    return patchoat_executable_;
717  }
718  std::string patchoat_executable(GetAndroidRoot());
719  patchoat_executable += (kIsDebugBuild ? "/bin/patchoatd" : "/bin/patchoat");
720  return patchoat_executable;
721}
722
723std::string Runtime::GetCompilerExecutable() const {
724  if (!compiler_executable_.empty()) {
725    return compiler_executable_;
726  }
727  std::string compiler_executable(GetAndroidRoot());
728  compiler_executable += (kIsDebugBuild ? "/bin/dex2oatd" : "/bin/dex2oat");
729  return compiler_executable;
730}
731
732bool Runtime::Start() {
733  VLOG(startup) << "Runtime::Start entering";
734
735  CHECK(!no_sig_chain_) << "A started runtime should have sig chain enabled";
736
737  // If a debug host build, disable ptrace restriction for debugging and test timeout thread dump.
738  // Only 64-bit as prctl() may fail in 32 bit userspace on a 64-bit kernel.
739#if defined(__linux__) && !defined(ART_TARGET_ANDROID) && defined(__x86_64__)
740  if (kIsDebugBuild) {
741    CHECK_EQ(prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY), 0);
742  }
743#endif
744
745  // Restore main thread state to kNative as expected by native code.
746  Thread* self = Thread::Current();
747
748  self->TransitionFromRunnableToSuspended(kNative);
749
750  started_ = true;
751
752  if (!IsImageDex2OatEnabled() || !GetHeap()->HasBootImageSpace()) {
753    ScopedObjectAccess soa(self);
754    StackHandleScope<2> hs(soa.Self());
755
756    auto class_class(hs.NewHandle<mirror::Class>(mirror::Class::GetJavaLangClass()));
757    auto field_class(hs.NewHandle<mirror::Class>(mirror::Field::StaticClass()));
758
759    class_linker_->EnsureInitialized(soa.Self(), class_class, true, true);
760    // Field class is needed for register_java_net_InetAddress in libcore, b/28153851.
761    class_linker_->EnsureInitialized(soa.Self(), field_class, true, true);
762  }
763
764  // InitNativeMethods needs to be after started_ so that the classes
765  // it touches will have methods linked to the oat file if necessary.
766  {
767    ScopedTrace trace2("InitNativeMethods");
768    InitNativeMethods();
769  }
770
771  // IntializeIntrinsics needs to be called after the WellKnownClasses::Init in InitNativeMethods
772  // because in checking the invocation types of intrinsic methods ArtMethod::GetInvokeType()
773  // needs the SignaturePolymorphic annotation class which is initialized in WellKnownClasses::Init.
774  InitializeIntrinsics();
775
776  // Initialize well known thread group values that may be accessed threads while attaching.
777  InitThreadGroups(self);
778
779  Thread::FinishStartup();
780
781  // Create the JIT either if we have to use JIT compilation or save profiling info. This is
782  // done after FinishStartup as the JIT pool needs Java thread peers, which require the main
783  // ThreadGroup to exist.
784  //
785  // TODO(calin): We use the JIT class as a proxy for JIT compilation and for
786  // recoding profiles. Maybe we should consider changing the name to be more clear it's
787  // not only about compiling. b/28295073.
788  if (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) {
789    std::string error_msg;
790    if (!IsZygote()) {
791    // If we are the zygote then we need to wait until after forking to create the code cache
792    // due to SELinux restrictions on r/w/x memory regions.
793      CreateJit();
794    } else if (jit_options_->UseJitCompilation()) {
795      if (!jit::Jit::LoadCompilerLibrary(&error_msg)) {
796        // Try to load compiler pre zygote to reduce PSS. b/27744947
797        LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg;
798      }
799    }
800  }
801
802  // Send the start phase event. We have to wait till here as this is when the main thread peer
803  // has just been generated, important root clinits have been run and JNI is completely functional.
804  {
805    ScopedObjectAccess soa(self);
806    callbacks_->NextRuntimePhase(RuntimePhaseCallback::RuntimePhase::kStart);
807  }
808
809  system_class_loader_ = CreateSystemClassLoader(this);
810
811  if (!is_zygote_) {
812    if (is_native_bridge_loaded_) {
813      PreInitializeNativeBridge(".");
814    }
815    NativeBridgeAction action = force_native_bridge_
816        ? NativeBridgeAction::kInitialize
817        : NativeBridgeAction::kUnload;
818    InitNonZygoteOrPostFork(self->GetJniEnv(),
819                            /* is_system_server */ false,
820                            action,
821                            GetInstructionSetString(kRuntimeISA));
822  }
823
824  // Send the initialized phase event. Send it before starting daemons, as otherwise
825  // sending thread events becomes complicated.
826  {
827    ScopedObjectAccess soa(self);
828    callbacks_->NextRuntimePhase(RuntimePhaseCallback::RuntimePhase::kInit);
829  }
830
831  StartDaemonThreads();
832
833  {
834    ScopedObjectAccess soa(self);
835    self->GetJniEnv()->AssertLocalsEmpty();
836  }
837
838  VLOG(startup) << "Runtime::Start exiting";
839  finished_starting_ = true;
840
841  if (trace_config_.get() != nullptr && trace_config_->trace_file != "") {
842    ScopedThreadStateChange tsc(self, kWaitingForMethodTracingStart);
843    Trace::Start(trace_config_->trace_file.c_str(),
844                 -1,
845                 static_cast<int>(trace_config_->trace_file_size),
846                 0,
847                 trace_config_->trace_output_mode,
848                 trace_config_->trace_mode,
849                 0);
850  }
851
852  // In case we have a profile path passed as a command line argument,
853  // register the current class path for profiling now. Note that we cannot do
854  // this before we create the JIT and having it here is the most convenient way.
855  // This is used when testing profiles with dalvikvm command as there is no
856  // framework to register the dex files for profiling.
857  if (jit_.get() != nullptr && jit_options_->GetSaveProfilingInfo() &&
858      !jit_options_->GetProfileSaverOptions().GetProfilePath().empty()) {
859    std::vector<std::string> dex_filenames;
860    Split(class_path_string_, ':', &dex_filenames);
861    RegisterAppInfo(dex_filenames, jit_options_->GetProfileSaverOptions().GetProfilePath());
862  }
863
864  return true;
865}
866
867void Runtime::EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) {
868  DCHECK_GT(threads_being_born_, 0U);
869  threads_being_born_--;
870  if (shutting_down_started_ && threads_being_born_ == 0) {
871    shutdown_cond_->Broadcast(Thread::Current());
872  }
873}
874
875void Runtime::InitNonZygoteOrPostFork(
876    JNIEnv* env,
877    bool is_system_server,
878    NativeBridgeAction action,
879    const char* isa,
880    bool profile_system_server) {
881  is_zygote_ = false;
882
883  if (is_native_bridge_loaded_) {
884    switch (action) {
885      case NativeBridgeAction::kUnload:
886        UnloadNativeBridge();
887        is_native_bridge_loaded_ = false;
888        break;
889
890      case NativeBridgeAction::kInitialize:
891        InitializeNativeBridge(env, isa);
892        break;
893    }
894  }
895
896  // Create the thread pools.
897  heap_->CreateThreadPool();
898  // Reset the gc performance data at zygote fork so that the GCs
899  // before fork aren't attributed to an app.
900  heap_->ResetGcPerformanceInfo();
901
902  // We may want to collect profiling samples for system server, but we never want to JIT there.
903  if (is_system_server) {
904    jit_options_->SetUseJitCompilation(false);
905    jit_options_->SetSaveProfilingInfo(profile_system_server);
906    if (profile_system_server) {
907      jit_options_->SetWaitForJitNotificationsToSaveProfile(false);
908      VLOG(profiler) << "Enabling system server profiles";
909    }
910  }
911  if (!safe_mode_ &&
912      (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) &&
913      jit_ == nullptr) {
914    // Note that when running ART standalone (not zygote, nor zygote fork),
915    // the jit may have already been created.
916    CreateJit();
917  }
918
919  StartSignalCatcher();
920
921  // Start the JDWP thread. If the command-line debugger flags specified "suspend=y",
922  // this will pause the runtime (in the internal debugger implementation), so we probably want
923  // this to come last.
924  ScopedObjectAccess soa(Thread::Current());
925  GetRuntimeCallbacks()->StartDebugger();
926}
927
928void Runtime::StartSignalCatcher() {
929  if (!is_zygote_) {
930    signal_catcher_ = new SignalCatcher(stack_trace_file_, use_tombstoned_traces_);
931  }
932}
933
934bool Runtime::IsShuttingDown(Thread* self) {
935  MutexLock mu(self, *Locks::runtime_shutdown_lock_);
936  return IsShuttingDownLocked();
937}
938
939void Runtime::StartDaemonThreads() {
940  ScopedTrace trace(__FUNCTION__);
941  VLOG(startup) << "Runtime::StartDaemonThreads entering";
942
943  Thread* self = Thread::Current();
944
945  // Must be in the kNative state for calling native methods.
946  CHECK_EQ(self->GetState(), kNative);
947
948  JNIEnv* env = self->GetJniEnv();
949  env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
950                            WellKnownClasses::java_lang_Daemons_start);
951  if (env->ExceptionCheck()) {
952    env->ExceptionDescribe();
953    LOG(FATAL) << "Error starting java.lang.Daemons";
954  }
955
956  VLOG(startup) << "Runtime::StartDaemonThreads exiting";
957}
958
959// Attempts to open dex files from image(s). Given the image location, try to find the oat file
960// and open it to get the stored dex file. If the image is the first for a multi-image boot
961// classpath, go on and also open the other images.
962static bool OpenDexFilesFromImage(const std::string& image_location,
963                                  std::vector<std::unique_ptr<const DexFile>>* dex_files,
964                                  size_t* failures) {
965  DCHECK(dex_files != nullptr) << "OpenDexFilesFromImage: out-param is nullptr";
966
967  // Use a work-list approach, so that we can easily reuse the opening code.
968  std::vector<std::string> image_locations;
969  image_locations.push_back(image_location);
970
971  for (size_t index = 0; index < image_locations.size(); ++index) {
972    std::string system_filename;
973    bool has_system = false;
974    std::string cache_filename_unused;
975    bool dalvik_cache_exists_unused;
976    bool has_cache_unused;
977    bool is_global_cache_unused;
978    bool found_image = gc::space::ImageSpace::FindImageFilename(image_locations[index].c_str(),
979                                                                kRuntimeISA,
980                                                                &system_filename,
981                                                                &has_system,
982                                                                &cache_filename_unused,
983                                                                &dalvik_cache_exists_unused,
984                                                                &has_cache_unused,
985                                                                &is_global_cache_unused);
986
987    if (!found_image || !has_system) {
988      return false;
989    }
990
991    // We are falling back to non-executable use of the oat file because patching failed, presumably
992    // due to lack of space.
993    std::string vdex_filename =
994        ImageHeader::GetVdexLocationFromImageLocation(system_filename.c_str());
995    std::string oat_filename =
996        ImageHeader::GetOatLocationFromImageLocation(system_filename.c_str());
997    std::string oat_location =
998        ImageHeader::GetOatLocationFromImageLocation(image_locations[index].c_str());
999    // Note: in the multi-image case, the image location may end in ".jar," and not ".art." Handle
1000    //       that here.
1001    if (android::base::EndsWith(oat_location, ".jar")) {
1002      oat_location.replace(oat_location.length() - 3, 3, "oat");
1003    }
1004    std::string error_msg;
1005
1006    std::unique_ptr<VdexFile> vdex_file(VdexFile::Open(vdex_filename,
1007                                                       false /* writable */,
1008                                                       false /* low_4gb */,
1009                                                       false, /* unquicken */
1010                                                       &error_msg));
1011    if (vdex_file.get() == nullptr) {
1012      return false;
1013    }
1014
1015    std::unique_ptr<File> file(OS::OpenFileForReading(oat_filename.c_str()));
1016    if (file.get() == nullptr) {
1017      return false;
1018    }
1019    std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file.get(),
1020                                                    false /* writable */,
1021                                                    false /* program_header_only */,
1022                                                    false /* low_4gb */,
1023                                                    &error_msg));
1024    if (elf_file.get() == nullptr) {
1025      return false;
1026    }
1027    std::unique_ptr<const OatFile> oat_file(
1028        OatFile::OpenWithElfFile(/* zip_fd */ -1,
1029                                 elf_file.release(),
1030                                 vdex_file.release(),
1031                                 oat_location,
1032                                 nullptr,
1033                                 &error_msg));
1034    if (oat_file == nullptr) {
1035      LOG(WARNING) << "Unable to use '" << oat_filename << "' because " << error_msg;
1036      return false;
1037    }
1038
1039    for (const OatFile::OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
1040      if (oat_dex_file == nullptr) {
1041        *failures += 1;
1042        continue;
1043      }
1044      std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error_msg);
1045      if (dex_file.get() == nullptr) {
1046        *failures += 1;
1047      } else {
1048        dex_files->push_back(std::move(dex_file));
1049      }
1050    }
1051
1052    if (index == 0) {
1053      // First file. See if this is a multi-image environment, and if so, enqueue the other images.
1054      const OatHeader& boot_oat_header = oat_file->GetOatHeader();
1055      const char* boot_cp = boot_oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey);
1056      if (boot_cp != nullptr) {
1057        gc::space::ImageSpace::ExtractMultiImageLocations(image_locations[0],
1058                                                          boot_cp,
1059                                                          &image_locations);
1060      }
1061    }
1062
1063    Runtime::Current()->GetOatFileManager().RegisterOatFile(std::move(oat_file));
1064  }
1065  return true;
1066}
1067
1068
1069static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames,
1070                           const std::vector<std::string>& dex_locations,
1071                           const std::string& image_location,
1072                           std::vector<std::unique_ptr<const DexFile>>* dex_files) {
1073  DCHECK(dex_files != nullptr) << "OpenDexFiles: out-param is nullptr";
1074  size_t failure_count = 0;
1075  if (!image_location.empty() && OpenDexFilesFromImage(image_location, dex_files, &failure_count)) {
1076    return failure_count;
1077  }
1078  const ArtDexFileLoader dex_file_loader;
1079  failure_count = 0;
1080  for (size_t i = 0; i < dex_filenames.size(); i++) {
1081    const char* dex_filename = dex_filenames[i].c_str();
1082    const char* dex_location = dex_locations[i].c_str();
1083    static constexpr bool kVerifyChecksum = true;
1084    std::string error_msg;
1085    if (!OS::FileExists(dex_filename)) {
1086      LOG(WARNING) << "Skipping non-existent dex file '" << dex_filename << "'";
1087      continue;
1088    }
1089    if (!dex_file_loader.Open(dex_filename,
1090                              dex_location,
1091                              Runtime::Current()->IsVerificationEnabled(),
1092                              kVerifyChecksum,
1093                              &error_msg,
1094                              dex_files)) {
1095      LOG(WARNING) << "Failed to open .dex from file '" << dex_filename << "': " << error_msg;
1096      ++failure_count;
1097    }
1098  }
1099  return failure_count;
1100}
1101
1102void Runtime::SetSentinel(mirror::Object* sentinel) {
1103  CHECK(sentinel_.Read() == nullptr);
1104  CHECK(sentinel != nullptr);
1105  CHECK(!heap_->IsMovableObject(sentinel));
1106  sentinel_ = GcRoot<mirror::Object>(sentinel);
1107}
1108
1109bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
1110  // (b/30160149): protect subprocesses from modifications to LD_LIBRARY_PATH, etc.
1111  // Take a snapshot of the environment at the time the runtime was created, for use by Exec, etc.
1112  env_snapshot_.TakeSnapshot();
1113
1114  using Opt = RuntimeArgumentMap;
1115  Opt runtime_options(std::move(runtime_options_in));
1116  ScopedTrace trace(__FUNCTION__);
1117  CHECK_EQ(sysconf(_SC_PAGE_SIZE), kPageSize);
1118
1119  // Early override for logging output.
1120  if (runtime_options.Exists(Opt::UseStderrLogger)) {
1121    android::base::SetLogger(android::base::StderrLogger);
1122  }
1123
1124  MemMap::Init();
1125
1126  // Try to reserve a dedicated fault page. This is allocated for clobbered registers and sentinels.
1127  // If we cannot reserve it, log a warning.
1128  // Note: We allocate this first to have a good chance of grabbing the page. The address (0xebad..)
1129  //       is out-of-the-way enough that it should not collide with boot image mapping.
1130  // Note: Don't request an error message. That will lead to a maps dump in the case of failure,
1131  //       leading to logspam.
1132  {
1133    constexpr uintptr_t kSentinelAddr =
1134        RoundDown(static_cast<uintptr_t>(Context::kBadGprBase), kPageSize);
1135    protected_fault_page_.reset(MemMap::MapAnonymous("Sentinel fault page",
1136                                                     reinterpret_cast<uint8_t*>(kSentinelAddr),
1137                                                     kPageSize,
1138                                                     PROT_NONE,
1139                                                     /* low_4g */ true,
1140                                                     /* reuse */ false,
1141                                                     /* error_msg */ nullptr));
1142    if (protected_fault_page_ == nullptr) {
1143      LOG(WARNING) << "Could not reserve sentinel fault page";
1144    } else if (reinterpret_cast<uintptr_t>(protected_fault_page_->Begin()) != kSentinelAddr) {
1145      LOG(WARNING) << "Could not reserve sentinel fault page at the right address.";
1146      protected_fault_page_.reset();
1147    }
1148  }
1149
1150  VLOG(startup) << "Runtime::Init -verbose:startup enabled";
1151
1152  QuasiAtomic::Startup();
1153
1154  oat_file_manager_ = new OatFileManager;
1155
1156  Thread::SetSensitiveThreadHook(runtime_options.GetOrDefault(Opt::HookIsSensitiveThread));
1157  Monitor::Init(runtime_options.GetOrDefault(Opt::LockProfThreshold),
1158                runtime_options.GetOrDefault(Opt::StackDumpLockProfThreshold));
1159
1160  boot_class_path_string_ = runtime_options.ReleaseOrDefault(Opt::BootClassPath);
1161  class_path_string_ = runtime_options.ReleaseOrDefault(Opt::ClassPath);
1162  properties_ = runtime_options.ReleaseOrDefault(Opt::PropertiesList);
1163
1164  compiler_callbacks_ = runtime_options.GetOrDefault(Opt::CompilerCallbacksPtr);
1165  patchoat_executable_ = runtime_options.ReleaseOrDefault(Opt::PatchOat);
1166  must_relocate_ = runtime_options.GetOrDefault(Opt::Relocate);
1167  is_zygote_ = runtime_options.Exists(Opt::Zygote);
1168  is_explicit_gc_disabled_ = runtime_options.Exists(Opt::DisableExplicitGC);
1169  dex2oat_enabled_ = runtime_options.GetOrDefault(Opt::Dex2Oat);
1170  image_dex2oat_enabled_ = runtime_options.GetOrDefault(Opt::ImageDex2Oat);
1171  dump_native_stack_on_sig_quit_ = runtime_options.GetOrDefault(Opt::DumpNativeStackOnSigQuit);
1172
1173  vfprintf_ = runtime_options.GetOrDefault(Opt::HookVfprintf);
1174  exit_ = runtime_options.GetOrDefault(Opt::HookExit);
1175  abort_ = runtime_options.GetOrDefault(Opt::HookAbort);
1176
1177  default_stack_size_ = runtime_options.GetOrDefault(Opt::StackSize);
1178  use_tombstoned_traces_ = runtime_options.GetOrDefault(Opt::UseTombstonedTraces);
1179#if !defined(ART_TARGET_ANDROID)
1180  CHECK(!use_tombstoned_traces_)
1181      << "-Xusetombstonedtraces is only supported in an Android environment";
1182#endif
1183  stack_trace_file_ = runtime_options.ReleaseOrDefault(Opt::StackTraceFile);
1184
1185  compiler_executable_ = runtime_options.ReleaseOrDefault(Opt::Compiler);
1186  compiler_options_ = runtime_options.ReleaseOrDefault(Opt::CompilerOptions);
1187  for (StringPiece option : Runtime::Current()->GetCompilerOptions()) {
1188    if (option.starts_with("--debuggable")) {
1189      SetJavaDebuggable(true);
1190      break;
1191    }
1192  }
1193  image_compiler_options_ = runtime_options.ReleaseOrDefault(Opt::ImageCompilerOptions);
1194  image_location_ = runtime_options.GetOrDefault(Opt::Image);
1195
1196  max_spins_before_thin_lock_inflation_ =
1197      runtime_options.GetOrDefault(Opt::MaxSpinsBeforeThinLockInflation);
1198
1199  monitor_list_ = new MonitorList;
1200  monitor_pool_ = MonitorPool::Create();
1201  thread_list_ = new ThreadList(runtime_options.GetOrDefault(Opt::ThreadSuspendTimeout));
1202  intern_table_ = new InternTable;
1203
1204  verify_ = runtime_options.GetOrDefault(Opt::Verify);
1205  allow_dex_file_fallback_ = !runtime_options.Exists(Opt::NoDexFileFallback);
1206
1207  target_sdk_version_ = runtime_options.GetOrDefault(Opt::TargetSdkVersion);
1208
1209  // Check whether to enforce hidden API access checks. The checks are disabled
1210  // by default and we only enable them if:
1211  // (a) runtime was started with a flag that enables the checks, or
1212  // (b) Zygote forked a new process that is not exempt (see ZygoteHooks).
1213  bool do_hidden_api_checks = runtime_options.Exists(Opt::HiddenApiChecks);
1214  DCHECK(!is_zygote_ || !do_hidden_api_checks);
1215  // TODO pass the actual enforcement policy in, rather than just a single bit.
1216  // As is, we're encoding some logic here about which specific policy to use, which would be better
1217  // controlled by the framework.
1218  hidden_api_policy_ = do_hidden_api_checks
1219      ? hiddenapi::EnforcementPolicy::kDarkGreyAndBlackList
1220      : hiddenapi::EnforcementPolicy::kNoChecks;
1221
1222  no_sig_chain_ = runtime_options.Exists(Opt::NoSigChain);
1223  force_native_bridge_ = runtime_options.Exists(Opt::ForceNativeBridge);
1224
1225  Split(runtime_options.GetOrDefault(Opt::CpuAbiList), ',', &cpu_abilist_);
1226
1227  fingerprint_ = runtime_options.ReleaseOrDefault(Opt::Fingerprint);
1228
1229  if (runtime_options.GetOrDefault(Opt::Interpret)) {
1230    GetInstrumentation()->ForceInterpretOnly();
1231  }
1232
1233  zygote_max_failed_boots_ = runtime_options.GetOrDefault(Opt::ZygoteMaxFailedBoots);
1234  experimental_flags_ = runtime_options.GetOrDefault(Opt::Experimental);
1235  is_low_memory_mode_ = runtime_options.Exists(Opt::LowMemoryMode);
1236  madvise_random_access_ = runtime_options.GetOrDefault(Opt::MadviseRandomAccess);
1237
1238  plugins_ = runtime_options.ReleaseOrDefault(Opt::Plugins);
1239  agent_specs_ = runtime_options.ReleaseOrDefault(Opt::AgentPath);
1240  // TODO Add back in -agentlib
1241  // for (auto lib : runtime_options.ReleaseOrDefault(Opt::AgentLib)) {
1242  //   agents_.push_back(lib);
1243  // }
1244
1245  float foreground_heap_growth_multiplier;
1246  if (is_low_memory_mode_ && !runtime_options.Exists(Opt::ForegroundHeapGrowthMultiplier)) {
1247    // If low memory mode, use 1.0 as the multiplier by default.
1248    foreground_heap_growth_multiplier = 1.0f;
1249  } else {
1250    foreground_heap_growth_multiplier =
1251        runtime_options.GetOrDefault(Opt::ForegroundHeapGrowthMultiplier) +
1252            kExtraDefaultHeapGrowthMultiplier;
1253  }
1254  XGcOption xgc_option = runtime_options.GetOrDefault(Opt::GcOption);
1255  heap_ = new gc::Heap(runtime_options.GetOrDefault(Opt::MemoryInitialSize),
1256                       runtime_options.GetOrDefault(Opt::HeapGrowthLimit),
1257                       runtime_options.GetOrDefault(Opt::HeapMinFree),
1258                       runtime_options.GetOrDefault(Opt::HeapMaxFree),
1259                       runtime_options.GetOrDefault(Opt::HeapTargetUtilization),
1260                       foreground_heap_growth_multiplier,
1261                       runtime_options.GetOrDefault(Opt::MemoryMaximumSize),
1262                       runtime_options.GetOrDefault(Opt::NonMovingSpaceCapacity),
1263                       runtime_options.GetOrDefault(Opt::Image),
1264                       runtime_options.GetOrDefault(Opt::ImageInstructionSet),
1265                       // Override the collector type to CC if the read barrier config.
1266                       kUseReadBarrier ? gc::kCollectorTypeCC : xgc_option.collector_type_,
1267                       kUseReadBarrier ? BackgroundGcOption(gc::kCollectorTypeCCBackground)
1268                                       : runtime_options.GetOrDefault(Opt::BackgroundGc),
1269                       runtime_options.GetOrDefault(Opt::LargeObjectSpace),
1270                       runtime_options.GetOrDefault(Opt::LargeObjectThreshold),
1271                       runtime_options.GetOrDefault(Opt::ParallelGCThreads),
1272                       runtime_options.GetOrDefault(Opt::ConcGCThreads),
1273                       runtime_options.Exists(Opt::LowMemoryMode),
1274                       runtime_options.GetOrDefault(Opt::LongPauseLogThreshold),
1275                       runtime_options.GetOrDefault(Opt::LongGCLogThreshold),
1276                       runtime_options.Exists(Opt::IgnoreMaxFootprint),
1277                       runtime_options.GetOrDefault(Opt::UseTLAB),
1278                       xgc_option.verify_pre_gc_heap_,
1279                       xgc_option.verify_pre_sweeping_heap_,
1280                       xgc_option.verify_post_gc_heap_,
1281                       xgc_option.verify_pre_gc_rosalloc_,
1282                       xgc_option.verify_pre_sweeping_rosalloc_,
1283                       xgc_option.verify_post_gc_rosalloc_,
1284                       xgc_option.gcstress_,
1285                       xgc_option.measure_,
1286                       runtime_options.GetOrDefault(Opt::EnableHSpaceCompactForOOM),
1287                       runtime_options.GetOrDefault(Opt::HSpaceCompactForOOMMinIntervalsMs));
1288
1289  if (!heap_->HasBootImageSpace() && !allow_dex_file_fallback_) {
1290    LOG(ERROR) << "Dex file fallback disabled, cannot continue without image.";
1291    return false;
1292  }
1293
1294  dump_gc_performance_on_shutdown_ = runtime_options.Exists(Opt::DumpGCPerformanceOnShutdown);
1295
1296  jdwp_options_ = runtime_options.GetOrDefault(Opt::JdwpOptions);
1297  jdwp_provider_ = runtime_options.GetOrDefault(Opt::JdwpProvider);
1298  switch (jdwp_provider_) {
1299    case JdwpProvider::kNone: {
1300      VLOG(jdwp) << "Disabling all JDWP support.";
1301      if (!jdwp_options_.empty()) {
1302        bool has_transport = jdwp_options_.find("transport") != std::string::npos;
1303        const char* transport_internal = !has_transport ? "transport=dt_android_adb," : "";
1304        std::string adb_connection_args =
1305            std::string("  -XjdwpProvider:adbconnection -XjdwpOptions:") + jdwp_options_;
1306        LOG(WARNING) << "Jdwp options given when jdwp is disabled! You probably want to enable "
1307                     << "jdwp with one of:" << std::endl
1308                     << "  -XjdwpProvider:internal "
1309                     << "-XjdwpOptions:" << transport_internal << jdwp_options_ << std::endl
1310                     << "  -Xplugin:libopenjdkjvmti" << (kIsDebugBuild ? "d" : "") << ".so "
1311                     << "-agentpath:libjdwp.so=" << jdwp_options_ << std::endl
1312                     << (has_transport ? "" : adb_connection_args);
1313      }
1314      break;
1315    }
1316    case JdwpProvider::kInternal: {
1317      if (runtime_options.Exists(Opt::JdwpOptions)) {
1318        JDWP::JdwpOptions ops;
1319        if (!JDWP::ParseJdwpOptions(runtime_options.GetOrDefault(Opt::JdwpOptions), &ops)) {
1320          LOG(ERROR) << "failed to parse jdwp options!";
1321          return false;
1322        }
1323        Dbg::ConfigureJdwp(ops);
1324      }
1325      break;
1326    }
1327    case JdwpProvider::kAdbConnection: {
1328      constexpr const char* plugin_name = kIsDebugBuild ? "libadbconnectiond.so"
1329                                                        : "libadbconnection.so";
1330      plugins_.push_back(Plugin::Create(plugin_name));
1331    }
1332  }
1333  callbacks_->AddThreadLifecycleCallback(Dbg::GetThreadLifecycleCallback());
1334  callbacks_->AddClassLoadCallback(Dbg::GetClassLoadCallback());
1335
1336  jit_options_.reset(jit::JitOptions::CreateFromRuntimeArguments(runtime_options));
1337  if (IsAotCompiler()) {
1338    // If we are already the compiler at this point, we must be dex2oat. Don't create the jit in
1339    // this case.
1340    // If runtime_options doesn't have UseJIT set to true then CreateFromRuntimeArguments returns
1341    // null and we don't create the jit.
1342    jit_options_->SetUseJitCompilation(false);
1343    jit_options_->SetSaveProfilingInfo(false);
1344  }
1345
1346  // Use MemMap arena pool for jit, malloc otherwise. Malloc arenas are faster to allocate but
1347  // can't be trimmed as easily.
1348  const bool use_malloc = IsAotCompiler();
1349  arena_pool_.reset(new ArenaPool(use_malloc, /* low_4gb */ false));
1350  jit_arena_pool_.reset(
1351      new ArenaPool(/* use_malloc */ false, /* low_4gb */ false, "CompilerMetadata"));
1352
1353  if (IsAotCompiler() && Is64BitInstructionSet(kRuntimeISA)) {
1354    // 4gb, no malloc. Explanation in header.
1355    low_4gb_arena_pool_.reset(new ArenaPool(/* use_malloc */ false, /* low_4gb */ true));
1356  }
1357  linear_alloc_.reset(CreateLinearAlloc());
1358
1359  BlockSignals();
1360  InitPlatformSignalHandlers();
1361
1362  // Change the implicit checks flags based on runtime architecture.
1363  switch (kRuntimeISA) {
1364    case InstructionSet::kArm:
1365    case InstructionSet::kThumb2:
1366    case InstructionSet::kX86:
1367    case InstructionSet::kArm64:
1368    case InstructionSet::kX86_64:
1369    case InstructionSet::kMips:
1370    case InstructionSet::kMips64:
1371      implicit_null_checks_ = true;
1372      // Installing stack protection does not play well with valgrind.
1373      implicit_so_checks_ = !(RUNNING_ON_MEMORY_TOOL && kMemoryToolIsValgrind);
1374      break;
1375    default:
1376      // Keep the defaults.
1377      break;
1378  }
1379
1380  if (!no_sig_chain_) {
1381    // Dex2Oat's Runtime does not need the signal chain or the fault handler.
1382    if (implicit_null_checks_ || implicit_so_checks_ || implicit_suspend_checks_) {
1383      fault_manager.Init();
1384
1385      // These need to be in a specific order.  The null point check handler must be
1386      // after the suspend check and stack overflow check handlers.
1387      //
1388      // Note: the instances attach themselves to the fault manager and are handled by it. The manager
1389      //       will delete the instance on Shutdown().
1390      if (implicit_suspend_checks_) {
1391        new SuspensionHandler(&fault_manager);
1392      }
1393
1394      if (implicit_so_checks_) {
1395        new StackOverflowHandler(&fault_manager);
1396      }
1397
1398      if (implicit_null_checks_) {
1399        new NullPointerHandler(&fault_manager);
1400      }
1401
1402      if (kEnableJavaStackTraceHandler) {
1403        new JavaStackTraceHandler(&fault_manager);
1404      }
1405    }
1406  }
1407
1408  std::string error_msg;
1409  java_vm_ = JavaVMExt::Create(this, runtime_options, &error_msg);
1410  if (java_vm_.get() == nullptr) {
1411    LOG(ERROR) << "Could not initialize JavaVMExt: " << error_msg;
1412    return false;
1413  }
1414
1415  // Add the JniEnv handler.
1416  // TODO Refactor this stuff.
1417  java_vm_->AddEnvironmentHook(JNIEnvExt::GetEnvHandler);
1418
1419  Thread::Startup();
1420
1421  // ClassLinker needs an attached thread, but we can't fully attach a thread without creating
1422  // objects. We can't supply a thread group yet; it will be fixed later. Since we are the main
1423  // thread, we do not get a java peer.
1424  Thread* self = Thread::Attach("main", false, nullptr, false);
1425  CHECK_EQ(self->GetThreadId(), ThreadList::kMainThreadId);
1426  CHECK(self != nullptr);
1427
1428  self->SetCanCallIntoJava(!IsAotCompiler());
1429
1430  // Set us to runnable so tools using a runtime can allocate and GC by default
1431  self->TransitionFromSuspendedToRunnable();
1432
1433  // Now we're attached, we can take the heap locks and validate the heap.
1434  GetHeap()->EnableObjectValidation();
1435
1436  CHECK_GE(GetHeap()->GetContinuousSpaces().size(), 1U);
1437  if (UNLIKELY(IsAotCompiler())) {
1438    class_linker_ = new AotClassLinker(intern_table_);
1439  } else {
1440    class_linker_ = new ClassLinker(intern_table_);
1441  }
1442  if (GetHeap()->HasBootImageSpace()) {
1443    bool result = class_linker_->InitFromBootImage(&error_msg);
1444    if (!result) {
1445      LOG(ERROR) << "Could not initialize from image: " << error_msg;
1446      return false;
1447    }
1448    if (kIsDebugBuild) {
1449      for (auto image_space : GetHeap()->GetBootImageSpaces()) {
1450        image_space->VerifyImageAllocations();
1451      }
1452    }
1453    if (boot_class_path_string_.empty()) {
1454      // The bootclasspath is not explicitly specified: construct it from the loaded dex files.
1455      const std::vector<const DexFile*>& boot_class_path = GetClassLinker()->GetBootClassPath();
1456      std::vector<std::string> dex_locations;
1457      dex_locations.reserve(boot_class_path.size());
1458      for (const DexFile* dex_file : boot_class_path) {
1459        dex_locations.push_back(dex_file->GetLocation());
1460      }
1461      boot_class_path_string_ = android::base::Join(dex_locations, ':');
1462    }
1463    {
1464      ScopedTrace trace2("AddImageStringsToTable");
1465      GetInternTable()->AddImagesStringsToTable(heap_->GetBootImageSpaces());
1466    }
1467    if (IsJavaDebuggable()) {
1468      // Now that we have loaded the boot image, deoptimize its methods if we are running
1469      // debuggable, as the code may have been compiled non-debuggable.
1470      DeoptimizeBootImage();
1471    }
1472  } else {
1473    std::vector<std::string> dex_filenames;
1474    Split(boot_class_path_string_, ':', &dex_filenames);
1475
1476    std::vector<std::string> dex_locations;
1477    if (!runtime_options.Exists(Opt::BootClassPathLocations)) {
1478      dex_locations = dex_filenames;
1479    } else {
1480      dex_locations = runtime_options.GetOrDefault(Opt::BootClassPathLocations);
1481      CHECK_EQ(dex_filenames.size(), dex_locations.size());
1482    }
1483
1484    std::vector<std::unique_ptr<const DexFile>> boot_class_path;
1485    if (runtime_options.Exists(Opt::BootClassPathDexList)) {
1486      boot_class_path.swap(*runtime_options.GetOrDefault(Opt::BootClassPathDexList));
1487    } else {
1488      OpenDexFiles(dex_filenames,
1489                   dex_locations,
1490                   runtime_options.GetOrDefault(Opt::Image),
1491                   &boot_class_path);
1492    }
1493    instruction_set_ = runtime_options.GetOrDefault(Opt::ImageInstructionSet);
1494    if (!class_linker_->InitWithoutImage(std::move(boot_class_path), &error_msg)) {
1495      LOG(ERROR) << "Could not initialize without image: " << error_msg;
1496      return false;
1497    }
1498
1499    // TODO: Should we move the following to InitWithoutImage?
1500    SetInstructionSet(instruction_set_);
1501    for (uint32_t i = 0; i < kCalleeSaveSize; i++) {
1502      CalleeSaveType type = CalleeSaveType(i);
1503      if (!HasCalleeSaveMethod(type)) {
1504        SetCalleeSaveMethod(CreateCalleeSaveMethod(), type);
1505      }
1506    }
1507  }
1508
1509  CHECK(class_linker_ != nullptr);
1510
1511  verifier::MethodVerifier::Init();
1512
1513  if (runtime_options.Exists(Opt::MethodTrace)) {
1514    trace_config_.reset(new TraceConfig());
1515    trace_config_->trace_file = runtime_options.ReleaseOrDefault(Opt::MethodTraceFile);
1516    trace_config_->trace_file_size = runtime_options.ReleaseOrDefault(Opt::MethodTraceFileSize);
1517    trace_config_->trace_mode = Trace::TraceMode::kMethodTracing;
1518    trace_config_->trace_output_mode = runtime_options.Exists(Opt::MethodTraceStreaming) ?
1519        Trace::TraceOutputMode::kStreaming :
1520        Trace::TraceOutputMode::kFile;
1521  }
1522
1523  // TODO: move this to just be an Trace::Start argument
1524  Trace::SetDefaultClockSource(runtime_options.GetOrDefault(Opt::ProfileClock));
1525
1526  // Pre-allocate an OutOfMemoryError for the double-OOME case.
1527  self->ThrowNewException("Ljava/lang/OutOfMemoryError;",
1528                          "OutOfMemoryError thrown while trying to throw OutOfMemoryError; "
1529                          "no stack trace available");
1530  pre_allocated_OutOfMemoryError_ = GcRoot<mirror::Throwable>(self->GetException());
1531  self->ClearException();
1532
1533  // Pre-allocate a NoClassDefFoundError for the common case of failing to find a system class
1534  // ahead of checking the application's class loader.
1535  self->ThrowNewException("Ljava/lang/NoClassDefFoundError;",
1536                          "Class not found using the boot class loader; no stack trace available");
1537  pre_allocated_NoClassDefFoundError_ = GcRoot<mirror::Throwable>(self->GetException());
1538  self->ClearException();
1539
1540  // Runtime initialization is largely done now.
1541  // We load plugins first since that can modify the runtime state slightly.
1542  // Load all plugins
1543  for (auto& plugin : plugins_) {
1544    std::string err;
1545    if (!plugin.Load(&err)) {
1546      LOG(FATAL) << plugin << " failed to load: " << err;
1547    }
1548  }
1549
1550  // Look for a native bridge.
1551  //
1552  // The intended flow here is, in the case of a running system:
1553  //
1554  // Runtime::Init() (zygote):
1555  //   LoadNativeBridge -> dlopen from cmd line parameter.
1556  //  |
1557  //  V
1558  // Runtime::Start() (zygote):
1559  //   No-op wrt native bridge.
1560  //  |
1561  //  | start app
1562  //  V
1563  // DidForkFromZygote(action)
1564  //   action = kUnload -> dlclose native bridge.
1565  //   action = kInitialize -> initialize library
1566  //
1567  //
1568  // The intended flow here is, in the case of a simple dalvikvm call:
1569  //
1570  // Runtime::Init():
1571  //   LoadNativeBridge -> dlopen from cmd line parameter.
1572  //  |
1573  //  V
1574  // Runtime::Start():
1575  //   DidForkFromZygote(kInitialize) -> try to initialize any native bridge given.
1576  //   No-op wrt native bridge.
1577  {
1578    std::string native_bridge_file_name = runtime_options.ReleaseOrDefault(Opt::NativeBridge);
1579    is_native_bridge_loaded_ = LoadNativeBridge(native_bridge_file_name);
1580  }
1581
1582  // Startup agents
1583  // TODO Maybe we should start a new thread to run these on. Investigate RI behavior more.
1584  for (auto& agent_spec : agent_specs_) {
1585    // TODO Check err
1586    int res = 0;
1587    std::string err = "";
1588    ti::LoadError error;
1589    std::unique_ptr<ti::Agent> agent = agent_spec.Load(&res, &error, &err);
1590
1591    if (agent != nullptr) {
1592      agents_.push_back(std::move(agent));
1593      continue;
1594    }
1595
1596    switch (error) {
1597      case ti::LoadError::kInitializationError:
1598        LOG(FATAL) << "Unable to initialize agent!";
1599        UNREACHABLE();
1600
1601      case ti::LoadError::kLoadingError:
1602        LOG(ERROR) << "Unable to load an agent: " << err;
1603        continue;
1604
1605      case ti::LoadError::kNoError:
1606        break;
1607    }
1608    LOG(FATAL) << "Unreachable";
1609    UNREACHABLE();
1610  }
1611  {
1612    ScopedObjectAccess soa(self);
1613    callbacks_->NextRuntimePhase(RuntimePhaseCallback::RuntimePhase::kInitialAgents);
1614  }
1615
1616  VLOG(startup) << "Runtime::Init exiting";
1617
1618  // Set OnlyUseSystemOatFiles only after boot classpath has been set up.
1619  if (runtime_options.Exists(Opt::OnlyUseSystemOatFiles)) {
1620    oat_file_manager_->SetOnlyUseSystemOatFiles();
1621  }
1622
1623  return true;
1624}
1625
1626static bool EnsureJvmtiPlugin(Runtime* runtime,
1627                              std::vector<Plugin>* plugins,
1628                              std::string* error_msg) {
1629  constexpr const char* plugin_name = kIsDebugBuild ? "libopenjdkjvmtid.so" : "libopenjdkjvmti.so";
1630
1631  // Is the plugin already loaded?
1632  for (const Plugin& p : *plugins) {
1633    if (p.GetLibrary() == plugin_name) {
1634      return true;
1635    }
1636  }
1637
1638  // TODO Rename Dbg::IsJdwpAllowed is IsDebuggingAllowed.
1639  DCHECK(Dbg::IsJdwpAllowed() || !runtime->IsJavaDebuggable())
1640      << "Being debuggable requires that jdwp (i.e. debugging) is allowed.";
1641  // Is the process debuggable? Otherwise, do not attempt to load the plugin unless we are
1642  // specifically allowed.
1643  if (!Dbg::IsJdwpAllowed()) {
1644    *error_msg = "Process is not allowed to load openjdkjvmti plugin. Process must be debuggable";
1645    return false;
1646  }
1647
1648  Plugin new_plugin = Plugin::Create(plugin_name);
1649
1650  if (!new_plugin.Load(error_msg)) {
1651    return false;
1652  }
1653
1654  plugins->push_back(std::move(new_plugin));
1655  return true;
1656}
1657
1658// Attach a new agent and add it to the list of runtime agents
1659//
1660// TODO: once we decide on the threading model for agents,
1661//   revisit this and make sure we're doing this on the right thread
1662//   (and we synchronize access to any shared data structures like "agents_")
1663//
1664void Runtime::AttachAgent(JNIEnv* env, const std::string& agent_arg, jobject class_loader) {
1665  std::string error_msg;
1666  if (!EnsureJvmtiPlugin(this, &plugins_, &error_msg)) {
1667    LOG(WARNING) << "Could not load plugin: " << error_msg;
1668    ScopedObjectAccess soa(Thread::Current());
1669    ThrowIOException("%s", error_msg.c_str());
1670    return;
1671  }
1672
1673  ti::AgentSpec agent_spec(agent_arg);
1674
1675  int res = 0;
1676  ti::LoadError error;
1677  std::unique_ptr<ti::Agent> agent = agent_spec.Attach(env, class_loader, &res, &error, &error_msg);
1678
1679  if (agent != nullptr) {
1680    agents_.push_back(std::move(agent));
1681  } else {
1682    LOG(WARNING) << "Agent attach failed (result=" << error << ") : " << error_msg;
1683    ScopedObjectAccess soa(Thread::Current());
1684    ThrowIOException("%s", error_msg.c_str());
1685  }
1686}
1687
1688void Runtime::InitNativeMethods() {
1689  VLOG(startup) << "Runtime::InitNativeMethods entering";
1690  Thread* self = Thread::Current();
1691  JNIEnv* env = self->GetJniEnv();
1692
1693  // Must be in the kNative state for calling native methods (JNI_OnLoad code).
1694  CHECK_EQ(self->GetState(), kNative);
1695
1696  // Set up the native methods provided by the runtime itself.
1697  RegisterRuntimeNativeMethods(env);
1698
1699  // Initialize classes used in JNI. The initialization requires runtime native
1700  // methods to be loaded first.
1701  WellKnownClasses::Init(env);
1702
1703  // Then set up libjavacore / libopenjdk, which are just a regular JNI libraries with
1704  // a regular JNI_OnLoad. Most JNI libraries can just use System.loadLibrary, but
1705  // libcore can't because it's the library that implements System.loadLibrary!
1706  {
1707    std::string error_msg;
1708    if (!java_vm_->LoadNativeLibrary(env, "libjavacore.so", nullptr, &error_msg)) {
1709      LOG(FATAL) << "LoadNativeLibrary failed for \"libjavacore.so\": " << error_msg;
1710    }
1711  }
1712  {
1713    constexpr const char* kOpenJdkLibrary = kIsDebugBuild
1714                                                ? "libopenjdkd.so"
1715                                                : "libopenjdk.so";
1716    std::string error_msg;
1717    if (!java_vm_->LoadNativeLibrary(env, kOpenJdkLibrary, nullptr, &error_msg)) {
1718      LOG(FATAL) << "LoadNativeLibrary failed for \"" << kOpenJdkLibrary << "\": " << error_msg;
1719    }
1720  }
1721
1722  // Initialize well known classes that may invoke runtime native methods.
1723  WellKnownClasses::LateInit(env);
1724
1725  VLOG(startup) << "Runtime::InitNativeMethods exiting";
1726}
1727
1728void Runtime::ReclaimArenaPoolMemory() {
1729  arena_pool_->LockReclaimMemory();
1730}
1731
1732void Runtime::InitThreadGroups(Thread* self) {
1733  JNIEnvExt* env = self->GetJniEnv();
1734  ScopedJniEnvLocalRefState env_state(env);
1735  main_thread_group_ =
1736      env->NewGlobalRef(env->GetStaticObjectField(
1737          WellKnownClasses::java_lang_ThreadGroup,
1738          WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup));
1739  CHECK(main_thread_group_ != nullptr || IsAotCompiler());
1740  system_thread_group_ =
1741      env->NewGlobalRef(env->GetStaticObjectField(
1742          WellKnownClasses::java_lang_ThreadGroup,
1743          WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup));
1744  CHECK(system_thread_group_ != nullptr || IsAotCompiler());
1745}
1746
1747jobject Runtime::GetMainThreadGroup() const {
1748  CHECK(main_thread_group_ != nullptr || IsAotCompiler());
1749  return main_thread_group_;
1750}
1751
1752jobject Runtime::GetSystemThreadGroup() const {
1753  CHECK(system_thread_group_ != nullptr || IsAotCompiler());
1754  return system_thread_group_;
1755}
1756
1757jobject Runtime::GetSystemClassLoader() const {
1758  CHECK(system_class_loader_ != nullptr || IsAotCompiler());
1759  return system_class_loader_;
1760}
1761
1762void Runtime::RegisterRuntimeNativeMethods(JNIEnv* env) {
1763  register_dalvik_system_DexFile(env);
1764  register_dalvik_system_VMDebug(env);
1765  register_dalvik_system_VMRuntime(env);
1766  register_dalvik_system_VMStack(env);
1767  register_dalvik_system_ZygoteHooks(env);
1768  register_java_lang_Class(env);
1769  register_java_lang_Object(env);
1770  register_java_lang_invoke_MethodHandleImpl(env);
1771  register_java_lang_ref_FinalizerReference(env);
1772  register_java_lang_reflect_Array(env);
1773  register_java_lang_reflect_Constructor(env);
1774  register_java_lang_reflect_Executable(env);
1775  register_java_lang_reflect_Field(env);
1776  register_java_lang_reflect_Method(env);
1777  register_java_lang_reflect_Parameter(env);
1778  register_java_lang_reflect_Proxy(env);
1779  register_java_lang_ref_Reference(env);
1780  register_java_lang_String(env);
1781  register_java_lang_StringFactory(env);
1782  register_java_lang_System(env);
1783  register_java_lang_Thread(env);
1784  register_java_lang_Throwable(env);
1785  register_java_lang_VMClassLoader(env);
1786  register_java_util_concurrent_atomic_AtomicLong(env);
1787  register_libcore_util_CharsetUtils(env);
1788  register_org_apache_harmony_dalvik_ddmc_DdmServer(env);
1789  register_org_apache_harmony_dalvik_ddmc_DdmVmInternal(env);
1790  register_sun_misc_Unsafe(env);
1791}
1792
1793std::ostream& operator<<(std::ostream& os, const DeoptimizationKind& kind) {
1794  os << GetDeoptimizationKindName(kind);
1795  return os;
1796}
1797
1798void Runtime::DumpDeoptimizations(std::ostream& os) {
1799  for (size_t i = 0; i <= static_cast<size_t>(DeoptimizationKind::kLast); ++i) {
1800    if (deoptimization_counts_[i] != 0) {
1801      os << "Number of "
1802         << GetDeoptimizationKindName(static_cast<DeoptimizationKind>(i))
1803         << " deoptimizations: "
1804         << deoptimization_counts_[i]
1805         << "\n";
1806    }
1807  }
1808}
1809
1810void Runtime::DumpForSigQuit(std::ostream& os) {
1811  GetClassLinker()->DumpForSigQuit(os);
1812  GetInternTable()->DumpForSigQuit(os);
1813  GetJavaVM()->DumpForSigQuit(os);
1814  GetHeap()->DumpForSigQuit(os);
1815  oat_file_manager_->DumpForSigQuit(os);
1816  if (GetJit() != nullptr) {
1817    GetJit()->DumpForSigQuit(os);
1818  } else {
1819    os << "Running non JIT\n";
1820  }
1821  DumpDeoptimizations(os);
1822  TrackedAllocators::Dump(os);
1823  os << "\n";
1824
1825  thread_list_->DumpForSigQuit(os);
1826  BaseMutex::DumpAll(os);
1827
1828  // Inform anyone else who is interested in SigQuit.
1829  {
1830    ScopedObjectAccess soa(Thread::Current());
1831    callbacks_->SigQuit();
1832  }
1833}
1834
1835void Runtime::DumpLockHolders(std::ostream& os) {
1836  uint64_t mutator_lock_owner = Locks::mutator_lock_->GetExclusiveOwnerTid();
1837  pid_t thread_list_lock_owner = GetThreadList()->GetLockOwner();
1838  pid_t classes_lock_owner = GetClassLinker()->GetClassesLockOwner();
1839  pid_t dex_lock_owner = GetClassLinker()->GetDexLockOwner();
1840  if ((thread_list_lock_owner | classes_lock_owner | dex_lock_owner) != 0) {
1841    os << "Mutator lock exclusive owner tid: " << mutator_lock_owner << "\n"
1842       << "ThreadList lock owner tid: " << thread_list_lock_owner << "\n"
1843       << "ClassLinker classes lock owner tid: " << classes_lock_owner << "\n"
1844       << "ClassLinker dex lock owner tid: " << dex_lock_owner << "\n";
1845  }
1846}
1847
1848void Runtime::SetStatsEnabled(bool new_state) {
1849  Thread* self = Thread::Current();
1850  MutexLock mu(self, *Locks::instrument_entrypoints_lock_);
1851  if (new_state == true) {
1852    GetStats()->Clear(~0);
1853    // TODO: wouldn't it make more sense to clear _all_ threads' stats?
1854    self->GetStats()->Clear(~0);
1855    if (stats_enabled_ != new_state) {
1856      GetInstrumentation()->InstrumentQuickAllocEntryPointsLocked();
1857    }
1858  } else if (stats_enabled_ != new_state) {
1859    GetInstrumentation()->UninstrumentQuickAllocEntryPointsLocked();
1860  }
1861  stats_enabled_ = new_state;
1862}
1863
1864void Runtime::ResetStats(int kinds) {
1865  GetStats()->Clear(kinds & 0xffff);
1866  // TODO: wouldn't it make more sense to clear _all_ threads' stats?
1867  Thread::Current()->GetStats()->Clear(kinds >> 16);
1868}
1869
1870int32_t Runtime::GetStat(int kind) {
1871  RuntimeStats* stats;
1872  if (kind < (1<<16)) {
1873    stats = GetStats();
1874  } else {
1875    stats = Thread::Current()->GetStats();
1876    kind >>= 16;
1877  }
1878  switch (kind) {
1879  case KIND_ALLOCATED_OBJECTS:
1880    return stats->allocated_objects;
1881  case KIND_ALLOCATED_BYTES:
1882    return stats->allocated_bytes;
1883  case KIND_FREED_OBJECTS:
1884    return stats->freed_objects;
1885  case KIND_FREED_BYTES:
1886    return stats->freed_bytes;
1887  case KIND_GC_INVOCATIONS:
1888    return stats->gc_for_alloc_count;
1889  case KIND_CLASS_INIT_COUNT:
1890    return stats->class_init_count;
1891  case KIND_CLASS_INIT_TIME:
1892    // Convert ns to us, reduce to 32 bits.
1893    return static_cast<int>(stats->class_init_time_ns / 1000);
1894  case KIND_EXT_ALLOCATED_OBJECTS:
1895  case KIND_EXT_ALLOCATED_BYTES:
1896  case KIND_EXT_FREED_OBJECTS:
1897  case KIND_EXT_FREED_BYTES:
1898    return 0;  // backward compatibility
1899  default:
1900    LOG(FATAL) << "Unknown statistic " << kind;
1901    return -1;  // unreachable
1902  }
1903}
1904
1905void Runtime::BlockSignals() {
1906  SignalSet signals;
1907  signals.Add(SIGPIPE);
1908  // SIGQUIT is used to dump the runtime's state (including stack traces).
1909  signals.Add(SIGQUIT);
1910  // SIGUSR1 is used to initiate a GC.
1911  signals.Add(SIGUSR1);
1912  signals.Block();
1913}
1914
1915bool Runtime::AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
1916                                  bool create_peer) {
1917  ScopedTrace trace(__FUNCTION__);
1918  Thread* self = Thread::Attach(thread_name, as_daemon, thread_group, create_peer);
1919  // Run ThreadGroup.add to notify the group that this thread is now started.
1920  if (self != nullptr && create_peer && !IsAotCompiler()) {
1921    ScopedObjectAccess soa(self);
1922    self->NotifyThreadGroup(soa, thread_group);
1923  }
1924  return self != nullptr;
1925}
1926
1927void Runtime::DetachCurrentThread() {
1928  ScopedTrace trace(__FUNCTION__);
1929  Thread* self = Thread::Current();
1930  if (self == nullptr) {
1931    LOG(FATAL) << "attempting to detach thread that is not attached";
1932  }
1933  if (self->HasManagedStack()) {
1934    LOG(FATAL) << *Thread::Current() << " attempting to detach while still running code";
1935  }
1936  thread_list_->Unregister(self);
1937}
1938
1939mirror::Throwable* Runtime::GetPreAllocatedOutOfMemoryError() {
1940  mirror::Throwable* oome = pre_allocated_OutOfMemoryError_.Read();
1941  if (oome == nullptr) {
1942    LOG(ERROR) << "Failed to return pre-allocated OOME";
1943  }
1944  return oome;
1945}
1946
1947mirror::Throwable* Runtime::GetPreAllocatedNoClassDefFoundError() {
1948  mirror::Throwable* ncdfe = pre_allocated_NoClassDefFoundError_.Read();
1949  if (ncdfe == nullptr) {
1950    LOG(ERROR) << "Failed to return pre-allocated NoClassDefFoundError";
1951  }
1952  return ncdfe;
1953}
1954
1955void Runtime::VisitConstantRoots(RootVisitor* visitor) {
1956  // Visit the classes held as static in mirror classes, these can be visited concurrently and only
1957  // need to be visited once per GC since they never change.
1958  mirror::Class::VisitRoots(visitor);
1959  mirror::Constructor::VisitRoots(visitor);
1960  mirror::Reference::VisitRoots(visitor);
1961  mirror::Method::VisitRoots(visitor);
1962  mirror::StackTraceElement::VisitRoots(visitor);
1963  mirror::String::VisitRoots(visitor);
1964  mirror::Throwable::VisitRoots(visitor);
1965  mirror::Field::VisitRoots(visitor);
1966  mirror::MethodType::VisitRoots(visitor);
1967  mirror::MethodHandleImpl::VisitRoots(visitor);
1968  mirror::MethodHandlesLookup::VisitRoots(visitor);
1969  mirror::EmulatedStackFrame::VisitRoots(visitor);
1970  mirror::ClassExt::VisitRoots(visitor);
1971  mirror::CallSite::VisitRoots(visitor);
1972  mirror::VarHandle::VisitRoots(visitor);
1973  mirror::FieldVarHandle::VisitRoots(visitor);
1974  mirror::ArrayElementVarHandle::VisitRoots(visitor);
1975  mirror::ByteArrayViewVarHandle::VisitRoots(visitor);
1976  mirror::ByteBufferViewVarHandle::VisitRoots(visitor);
1977  // Visit all the primitive array types classes.
1978  mirror::PrimitiveArray<uint8_t>::VisitRoots(visitor);   // BooleanArray
1979  mirror::PrimitiveArray<int8_t>::VisitRoots(visitor);    // ByteArray
1980  mirror::PrimitiveArray<uint16_t>::VisitRoots(visitor);  // CharArray
1981  mirror::PrimitiveArray<double>::VisitRoots(visitor);    // DoubleArray
1982  mirror::PrimitiveArray<float>::VisitRoots(visitor);     // FloatArray
1983  mirror::PrimitiveArray<int32_t>::VisitRoots(visitor);   // IntArray
1984  mirror::PrimitiveArray<int64_t>::VisitRoots(visitor);   // LongArray
1985  mirror::PrimitiveArray<int16_t>::VisitRoots(visitor);   // ShortArray
1986  // Visiting the roots of these ArtMethods is not currently required since all the GcRoots are
1987  // null.
1988  BufferedRootVisitor<16> buffered_visitor(visitor, RootInfo(kRootVMInternal));
1989  const PointerSize pointer_size = GetClassLinker()->GetImagePointerSize();
1990  if (HasResolutionMethod()) {
1991    resolution_method_->VisitRoots(buffered_visitor, pointer_size);
1992  }
1993  if (HasImtConflictMethod()) {
1994    imt_conflict_method_->VisitRoots(buffered_visitor, pointer_size);
1995  }
1996  if (imt_unimplemented_method_ != nullptr) {
1997    imt_unimplemented_method_->VisitRoots(buffered_visitor, pointer_size);
1998  }
1999  for (uint32_t i = 0; i < kCalleeSaveSize; ++i) {
2000    auto* m = reinterpret_cast<ArtMethod*>(callee_save_methods_[i]);
2001    if (m != nullptr) {
2002      m->VisitRoots(buffered_visitor, pointer_size);
2003    }
2004  }
2005}
2006
2007void Runtime::VisitConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags) {
2008  intern_table_->VisitRoots(visitor, flags);
2009  class_linker_->VisitRoots(visitor, flags);
2010  heap_->VisitAllocationRecords(visitor);
2011  if ((flags & kVisitRootFlagNewRoots) == 0) {
2012    // Guaranteed to have no new roots in the constant roots.
2013    VisitConstantRoots(visitor);
2014  }
2015  Dbg::VisitRoots(visitor);
2016}
2017
2018void Runtime::VisitTransactionRoots(RootVisitor* visitor) {
2019  for (auto& transaction : preinitialization_transactions_) {
2020    transaction->VisitRoots(visitor);
2021  }
2022}
2023
2024void Runtime::VisitNonThreadRoots(RootVisitor* visitor) {
2025  java_vm_->VisitRoots(visitor);
2026  sentinel_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
2027  pre_allocated_OutOfMemoryError_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
2028  pre_allocated_NoClassDefFoundError_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
2029  verifier::MethodVerifier::VisitStaticRoots(visitor);
2030  VisitTransactionRoots(visitor);
2031}
2032
2033void Runtime::VisitNonConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags) {
2034  VisitThreadRoots(visitor, flags);
2035  VisitNonThreadRoots(visitor);
2036}
2037
2038void Runtime::VisitThreadRoots(RootVisitor* visitor, VisitRootFlags flags) {
2039  thread_list_->VisitRoots(visitor, flags);
2040}
2041
2042void Runtime::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
2043  VisitNonConcurrentRoots(visitor, flags);
2044  VisitConcurrentRoots(visitor, flags);
2045}
2046
2047void Runtime::VisitImageRoots(RootVisitor* visitor) {
2048  for (auto* space : GetHeap()->GetContinuousSpaces()) {
2049    if (space->IsImageSpace()) {
2050      auto* image_space = space->AsImageSpace();
2051      const auto& image_header = image_space->GetImageHeader();
2052      for (int32_t i = 0, size = image_header.GetImageRoots()->GetLength(); i != size; ++i) {
2053        auto* obj = image_header.GetImageRoot(static_cast<ImageHeader::ImageRoot>(i));
2054        if (obj != nullptr) {
2055          auto* after_obj = obj;
2056          visitor->VisitRoot(&after_obj, RootInfo(kRootStickyClass));
2057          CHECK_EQ(after_obj, obj);
2058        }
2059      }
2060    }
2061  }
2062}
2063
2064static ArtMethod* CreateRuntimeMethod(ClassLinker* class_linker, LinearAlloc* linear_alloc) {
2065  const PointerSize image_pointer_size = class_linker->GetImagePointerSize();
2066  const size_t method_alignment = ArtMethod::Alignment(image_pointer_size);
2067  const size_t method_size = ArtMethod::Size(image_pointer_size);
2068  LengthPrefixedArray<ArtMethod>* method_array = class_linker->AllocArtMethodArray(
2069      Thread::Current(),
2070      linear_alloc,
2071      1);
2072  ArtMethod* method = &method_array->At(0, method_size, method_alignment);
2073  CHECK(method != nullptr);
2074  method->SetDexMethodIndex(dex::kDexNoIndex);
2075  CHECK(method->IsRuntimeMethod());
2076  return method;
2077}
2078
2079ArtMethod* Runtime::CreateImtConflictMethod(LinearAlloc* linear_alloc) {
2080  ClassLinker* const class_linker = GetClassLinker();
2081  ArtMethod* method = CreateRuntimeMethod(class_linker, linear_alloc);
2082  // When compiling, the code pointer will get set later when the image is loaded.
2083  const PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set_);
2084  if (IsAotCompiler()) {
2085    method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
2086  } else {
2087    method->SetEntryPointFromQuickCompiledCode(GetQuickImtConflictStub());
2088  }
2089  // Create empty conflict table.
2090  method->SetImtConflictTable(class_linker->CreateImtConflictTable(/*count*/0u, linear_alloc),
2091                              pointer_size);
2092  return method;
2093}
2094
2095void Runtime::SetImtConflictMethod(ArtMethod* method) {
2096  CHECK(method != nullptr);
2097  CHECK(method->IsRuntimeMethod());
2098  imt_conflict_method_ = method;
2099}
2100
2101ArtMethod* Runtime::CreateResolutionMethod() {
2102  auto* method = CreateRuntimeMethod(GetClassLinker(), GetLinearAlloc());
2103  // When compiling, the code pointer will get set later when the image is loaded.
2104  if (IsAotCompiler()) {
2105    PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set_);
2106    method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
2107  } else {
2108    method->SetEntryPointFromQuickCompiledCode(GetQuickResolutionStub());
2109  }
2110  return method;
2111}
2112
2113ArtMethod* Runtime::CreateCalleeSaveMethod() {
2114  auto* method = CreateRuntimeMethod(GetClassLinker(), GetLinearAlloc());
2115  PointerSize pointer_size = GetInstructionSetPointerSize(instruction_set_);
2116  method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
2117  DCHECK_NE(instruction_set_, InstructionSet::kNone);
2118  DCHECK(method->IsRuntimeMethod());
2119  return method;
2120}
2121
2122void Runtime::DisallowNewSystemWeaks() {
2123  CHECK(!kUseReadBarrier);
2124  monitor_list_->DisallowNewMonitors();
2125  intern_table_->ChangeWeakRootState(gc::kWeakRootStateNoReadsOrWrites);
2126  java_vm_->DisallowNewWeakGlobals();
2127  heap_->DisallowNewAllocationRecords();
2128  if (GetJit() != nullptr) {
2129    GetJit()->GetCodeCache()->DisallowInlineCacheAccess();
2130  }
2131
2132  // All other generic system-weak holders.
2133  for (gc::AbstractSystemWeakHolder* holder : system_weak_holders_) {
2134    holder->Disallow();
2135  }
2136}
2137
2138void Runtime::AllowNewSystemWeaks() {
2139  CHECK(!kUseReadBarrier);
2140  monitor_list_->AllowNewMonitors();
2141  intern_table_->ChangeWeakRootState(gc::kWeakRootStateNormal);  // TODO: Do this in the sweeping.
2142  java_vm_->AllowNewWeakGlobals();
2143  heap_->AllowNewAllocationRecords();
2144  if (GetJit() != nullptr) {
2145    GetJit()->GetCodeCache()->AllowInlineCacheAccess();
2146  }
2147
2148  // All other generic system-weak holders.
2149  for (gc::AbstractSystemWeakHolder* holder : system_weak_holders_) {
2150    holder->Allow();
2151  }
2152}
2153
2154void Runtime::BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint) {
2155  // This is used for the read barrier case that uses the thread-local
2156  // Thread::GetWeakRefAccessEnabled() flag and the checkpoint while weak ref access is disabled
2157  // (see ThreadList::RunCheckpoint).
2158  monitor_list_->BroadcastForNewMonitors();
2159  intern_table_->BroadcastForNewInterns();
2160  java_vm_->BroadcastForNewWeakGlobals();
2161  heap_->BroadcastForNewAllocationRecords();
2162  if (GetJit() != nullptr) {
2163    GetJit()->GetCodeCache()->BroadcastForInlineCacheAccess();
2164  }
2165
2166  // All other generic system-weak holders.
2167  for (gc::AbstractSystemWeakHolder* holder : system_weak_holders_) {
2168    holder->Broadcast(broadcast_for_checkpoint);
2169  }
2170}
2171
2172void Runtime::SetInstructionSet(InstructionSet instruction_set) {
2173  instruction_set_ = instruction_set;
2174  if ((instruction_set_ == InstructionSet::kThumb2) || (instruction_set_ == InstructionSet::kArm)) {
2175    for (int i = 0; i != kCalleeSaveSize; ++i) {
2176      CalleeSaveType type = static_cast<CalleeSaveType>(i);
2177      callee_save_method_frame_infos_[i] = arm::ArmCalleeSaveMethodFrameInfo(type);
2178    }
2179  } else if (instruction_set_ == InstructionSet::kMips) {
2180    for (int i = 0; i != kCalleeSaveSize; ++i) {
2181      CalleeSaveType type = static_cast<CalleeSaveType>(i);
2182      callee_save_method_frame_infos_[i] = mips::MipsCalleeSaveMethodFrameInfo(type);
2183    }
2184  } else if (instruction_set_ == InstructionSet::kMips64) {
2185    for (int i = 0; i != kCalleeSaveSize; ++i) {
2186      CalleeSaveType type = static_cast<CalleeSaveType>(i);
2187      callee_save_method_frame_infos_[i] = mips64::Mips64CalleeSaveMethodFrameInfo(type);
2188    }
2189  } else if (instruction_set_ == InstructionSet::kX86) {
2190    for (int i = 0; i != kCalleeSaveSize; ++i) {
2191      CalleeSaveType type = static_cast<CalleeSaveType>(i);
2192      callee_save_method_frame_infos_[i] = x86::X86CalleeSaveMethodFrameInfo(type);
2193    }
2194  } else if (instruction_set_ == InstructionSet::kX86_64) {
2195    for (int i = 0; i != kCalleeSaveSize; ++i) {
2196      CalleeSaveType type = static_cast<CalleeSaveType>(i);
2197      callee_save_method_frame_infos_[i] = x86_64::X86_64CalleeSaveMethodFrameInfo(type);
2198    }
2199  } else if (instruction_set_ == InstructionSet::kArm64) {
2200    for (int i = 0; i != kCalleeSaveSize; ++i) {
2201      CalleeSaveType type = static_cast<CalleeSaveType>(i);
2202      callee_save_method_frame_infos_[i] = arm64::Arm64CalleeSaveMethodFrameInfo(type);
2203    }
2204  } else {
2205    UNIMPLEMENTED(FATAL) << instruction_set_;
2206  }
2207}
2208
2209void Runtime::ClearInstructionSet() {
2210  instruction_set_ = InstructionSet::kNone;
2211}
2212
2213void Runtime::SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type) {
2214  DCHECK_LT(static_cast<uint32_t>(type), kCalleeSaveSize);
2215  CHECK(method != nullptr);
2216  callee_save_methods_[static_cast<size_t>(type)] = reinterpret_cast<uintptr_t>(method);
2217}
2218
2219void Runtime::ClearCalleeSaveMethods() {
2220  for (size_t i = 0; i < kCalleeSaveSize; ++i) {
2221    callee_save_methods_[i] = reinterpret_cast<uintptr_t>(nullptr);
2222  }
2223}
2224
2225void Runtime::RegisterAppInfo(const std::vector<std::string>& code_paths,
2226                              const std::string& profile_output_filename) {
2227  if (jit_.get() == nullptr) {
2228    // We are not JITing. Nothing to do.
2229    return;
2230  }
2231
2232  VLOG(profiler) << "Register app with " << profile_output_filename
2233      << " " << android::base::Join(code_paths, ':');
2234
2235  if (profile_output_filename.empty()) {
2236    LOG(WARNING) << "JIT profile information will not be recorded: profile filename is empty.";
2237    return;
2238  }
2239  if (!OS::FileExists(profile_output_filename.c_str(), false /*check_file_type*/)) {
2240    LOG(WARNING) << "JIT profile information will not be recorded: profile file does not exits.";
2241    return;
2242  }
2243  if (code_paths.empty()) {
2244    LOG(WARNING) << "JIT profile information will not be recorded: code paths is empty.";
2245    return;
2246  }
2247
2248  jit_->StartProfileSaver(profile_output_filename, code_paths);
2249}
2250
2251// Transaction support.
2252bool Runtime::IsActiveTransaction() const {
2253  return !preinitialization_transactions_.empty() && !GetTransaction()->IsRollingBack();
2254}
2255
2256void Runtime::EnterTransactionMode() {
2257  DCHECK(IsAotCompiler());
2258  DCHECK(!IsActiveTransaction());
2259  preinitialization_transactions_.push_back(std::make_unique<Transaction>());
2260}
2261
2262void Runtime::EnterTransactionMode(bool strict, mirror::Class* root) {
2263  DCHECK(IsAotCompiler());
2264  preinitialization_transactions_.push_back(std::make_unique<Transaction>(strict, root));
2265}
2266
2267void Runtime::ExitTransactionMode() {
2268  DCHECK(IsAotCompiler());
2269  DCHECK(IsActiveTransaction());
2270  preinitialization_transactions_.pop_back();
2271}
2272
2273void Runtime::RollbackAndExitTransactionMode() {
2274  DCHECK(IsAotCompiler());
2275  DCHECK(IsActiveTransaction());
2276  preinitialization_transactions_.back()->Rollback();
2277  preinitialization_transactions_.pop_back();
2278}
2279
2280bool Runtime::IsTransactionAborted() const {
2281  if (!IsActiveTransaction()) {
2282    return false;
2283  } else {
2284    DCHECK(IsAotCompiler());
2285    return GetTransaction()->IsAborted();
2286  }
2287}
2288
2289void Runtime::RollbackAllTransactions() {
2290  // If transaction is aborted, all transactions will be kept in the list.
2291  // Rollback and exit all of them.
2292  while (IsActiveTransaction()) {
2293    RollbackAndExitTransactionMode();
2294  }
2295}
2296
2297bool Runtime::IsActiveStrictTransactionMode() const {
2298  return IsActiveTransaction() && GetTransaction()->IsStrict();
2299}
2300
2301const std::unique_ptr<Transaction>& Runtime::GetTransaction() const {
2302  DCHECK(!preinitialization_transactions_.empty());
2303  return preinitialization_transactions_.back();
2304}
2305
2306void Runtime::AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message) {
2307  DCHECK(IsAotCompiler());
2308  DCHECK(IsActiveTransaction());
2309  // Throwing an exception may cause its class initialization. If we mark the transaction
2310  // aborted before that, we may warn with a false alarm. Throwing the exception before
2311  // marking the transaction aborted avoids that.
2312  // But now the transaction can be nested, and abort the transaction will relax the constraints
2313  // for constructing stack trace.
2314  GetTransaction()->Abort(abort_message);
2315  GetTransaction()->ThrowAbortError(self, &abort_message);
2316}
2317
2318void Runtime::ThrowTransactionAbortError(Thread* self) {
2319  DCHECK(IsAotCompiler());
2320  DCHECK(IsActiveTransaction());
2321  // Passing nullptr means we rethrow an exception with the earlier transaction abort message.
2322  GetTransaction()->ThrowAbortError(self, nullptr);
2323}
2324
2325void Runtime::RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset,
2326                                      uint8_t value, bool is_volatile) const {
2327  DCHECK(IsAotCompiler());
2328  DCHECK(IsActiveTransaction());
2329  GetTransaction()->RecordWriteFieldBoolean(obj, field_offset, value, is_volatile);
2330}
2331
2332void Runtime::RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset,
2333                                   int8_t value, bool is_volatile) const {
2334  DCHECK(IsAotCompiler());
2335  DCHECK(IsActiveTransaction());
2336  GetTransaction()->RecordWriteFieldByte(obj, field_offset, value, is_volatile);
2337}
2338
2339void Runtime::RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset,
2340                                   uint16_t value, bool is_volatile) const {
2341  DCHECK(IsAotCompiler());
2342  DCHECK(IsActiveTransaction());
2343  GetTransaction()->RecordWriteFieldChar(obj, field_offset, value, is_volatile);
2344}
2345
2346void Runtime::RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset,
2347                                    int16_t value, bool is_volatile) const {
2348  DCHECK(IsAotCompiler());
2349  DCHECK(IsActiveTransaction());
2350  GetTransaction()->RecordWriteFieldShort(obj, field_offset, value, is_volatile);
2351}
2352
2353void Runtime::RecordWriteField32(mirror::Object* obj, MemberOffset field_offset,
2354                                 uint32_t value, bool is_volatile) const {
2355  DCHECK(IsAotCompiler());
2356  DCHECK(IsActiveTransaction());
2357  GetTransaction()->RecordWriteField32(obj, field_offset, value, is_volatile);
2358}
2359
2360void Runtime::RecordWriteField64(mirror::Object* obj, MemberOffset field_offset,
2361                                 uint64_t value, bool is_volatile) const {
2362  DCHECK(IsAotCompiler());
2363  DCHECK(IsActiveTransaction());
2364  GetTransaction()->RecordWriteField64(obj, field_offset, value, is_volatile);
2365}
2366
2367void Runtime::RecordWriteFieldReference(mirror::Object* obj,
2368                                        MemberOffset field_offset,
2369                                        ObjPtr<mirror::Object> value,
2370                                        bool is_volatile) const {
2371  DCHECK(IsAotCompiler());
2372  DCHECK(IsActiveTransaction());
2373  GetTransaction()->RecordWriteFieldReference(obj,
2374                                                            field_offset,
2375                                                            value.Ptr(),
2376                                                            is_volatile);
2377}
2378
2379void Runtime::RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const {
2380  DCHECK(IsAotCompiler());
2381  DCHECK(IsActiveTransaction());
2382  GetTransaction()->RecordWriteArray(array, index, value);
2383}
2384
2385void Runtime::RecordStrongStringInsertion(ObjPtr<mirror::String> s) const {
2386  DCHECK(IsAotCompiler());
2387  DCHECK(IsActiveTransaction());
2388  GetTransaction()->RecordStrongStringInsertion(s);
2389}
2390
2391void Runtime::RecordWeakStringInsertion(ObjPtr<mirror::String> s) const {
2392  DCHECK(IsAotCompiler());
2393  DCHECK(IsActiveTransaction());
2394  GetTransaction()->RecordWeakStringInsertion(s);
2395}
2396
2397void Runtime::RecordStrongStringRemoval(ObjPtr<mirror::String> s) const {
2398  DCHECK(IsAotCompiler());
2399  DCHECK(IsActiveTransaction());
2400  GetTransaction()->RecordStrongStringRemoval(s);
2401}
2402
2403void Runtime::RecordWeakStringRemoval(ObjPtr<mirror::String> s) const {
2404  DCHECK(IsAotCompiler());
2405  DCHECK(IsActiveTransaction());
2406  GetTransaction()->RecordWeakStringRemoval(s);
2407}
2408
2409void Runtime::RecordResolveString(ObjPtr<mirror::DexCache> dex_cache,
2410                                  dex::StringIndex string_idx) const {
2411  DCHECK(IsAotCompiler());
2412  DCHECK(IsActiveTransaction());
2413  GetTransaction()->RecordResolveString(dex_cache, string_idx);
2414}
2415
2416void Runtime::SetFaultMessage(const std::string& message) {
2417  MutexLock mu(Thread::Current(), fault_message_lock_);
2418  fault_message_ = message;
2419}
2420
2421void Runtime::AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* argv)
2422    const {
2423  if (GetInstrumentation()->InterpretOnly()) {
2424    argv->push_back("--compiler-filter=quicken");
2425  }
2426
2427  // Make the dex2oat instruction set match that of the launching runtime. If we have multiple
2428  // architecture support, dex2oat may be compiled as a different instruction-set than that
2429  // currently being executed.
2430  std::string instruction_set("--instruction-set=");
2431  instruction_set += GetInstructionSetString(kRuntimeISA);
2432  argv->push_back(instruction_set);
2433
2434  std::unique_ptr<const InstructionSetFeatures> features(InstructionSetFeatures::FromCppDefines());
2435  std::string feature_string("--instruction-set-features=");
2436  feature_string += features->GetFeatureString();
2437  argv->push_back(feature_string);
2438}
2439
2440void Runtime::CreateJit() {
2441  CHECK(!IsAotCompiler());
2442  if (kIsDebugBuild && GetInstrumentation()->IsForcedInterpretOnly()) {
2443    DCHECK(!jit_options_->UseJitCompilation());
2444  }
2445  std::string error_msg;
2446  jit_.reset(jit::Jit::Create(jit_options_.get(), &error_msg));
2447  if (jit_.get() == nullptr) {
2448    LOG(WARNING) << "Failed to create JIT " << error_msg;
2449    return;
2450  }
2451}
2452
2453bool Runtime::CanRelocate() const {
2454  return !IsAotCompiler() || compiler_callbacks_->IsRelocationPossible();
2455}
2456
2457bool Runtime::IsCompilingBootImage() const {
2458  return IsCompiler() && compiler_callbacks_->IsBootImage();
2459}
2460
2461void Runtime::SetResolutionMethod(ArtMethod* method) {
2462  CHECK(method != nullptr);
2463  CHECK(method->IsRuntimeMethod()) << method;
2464  resolution_method_ = method;
2465}
2466
2467void Runtime::SetImtUnimplementedMethod(ArtMethod* method) {
2468  CHECK(method != nullptr);
2469  CHECK(method->IsRuntimeMethod());
2470  imt_unimplemented_method_ = method;
2471}
2472
2473void Runtime::FixupConflictTables() {
2474  // We can only do this after the class linker is created.
2475  const PointerSize pointer_size = GetClassLinker()->GetImagePointerSize();
2476  if (imt_unimplemented_method_->GetImtConflictTable(pointer_size) == nullptr) {
2477    imt_unimplemented_method_->SetImtConflictTable(
2478        ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size),
2479        pointer_size);
2480  }
2481  if (imt_conflict_method_->GetImtConflictTable(pointer_size) == nullptr) {
2482    imt_conflict_method_->SetImtConflictTable(
2483          ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size),
2484          pointer_size);
2485  }
2486}
2487
2488void Runtime::DisableVerifier() {
2489  verify_ = verifier::VerifyMode::kNone;
2490}
2491
2492bool Runtime::IsVerificationEnabled() const {
2493  return verify_ == verifier::VerifyMode::kEnable ||
2494      verify_ == verifier::VerifyMode::kSoftFail;
2495}
2496
2497bool Runtime::IsVerificationSoftFail() const {
2498  return verify_ == verifier::VerifyMode::kSoftFail;
2499}
2500
2501bool Runtime::IsAsyncDeoptimizeable(uintptr_t code) const {
2502  // We only support async deopt (ie the compiled code is not explicitly asking for
2503  // deopt, but something else like the debugger) in debuggable JIT code.
2504  // We could look at the oat file where `code` is being defined,
2505  // and check whether it's been compiled debuggable, but we decided to
2506  // only rely on the JIT for debuggable apps.
2507  return IsJavaDebuggable() &&
2508      GetJit() != nullptr &&
2509      GetJit()->GetCodeCache()->ContainsPc(reinterpret_cast<const void*>(code));
2510}
2511
2512LinearAlloc* Runtime::CreateLinearAlloc() {
2513  // For 64 bit compilers, it needs to be in low 4GB in the case where we are cross compiling for a
2514  // 32 bit target. In this case, we have 32 bit pointers in the dex cache arrays which can't hold
2515  // when we have 64 bit ArtMethod pointers.
2516  return (IsAotCompiler() && Is64BitInstructionSet(kRuntimeISA))
2517      ? new LinearAlloc(low_4gb_arena_pool_.get())
2518      : new LinearAlloc(arena_pool_.get());
2519}
2520
2521double Runtime::GetHashTableMinLoadFactor() const {
2522  return is_low_memory_mode_ ? kLowMemoryMinLoadFactor : kNormalMinLoadFactor;
2523}
2524
2525double Runtime::GetHashTableMaxLoadFactor() const {
2526  return is_low_memory_mode_ ? kLowMemoryMaxLoadFactor : kNormalMaxLoadFactor;
2527}
2528
2529void Runtime::UpdateProcessState(ProcessState process_state) {
2530  ProcessState old_process_state = process_state_;
2531  process_state_ = process_state;
2532  GetHeap()->UpdateProcessState(old_process_state, process_state);
2533}
2534
2535void Runtime::RegisterSensitiveThread() const {
2536  Thread::SetJitSensitiveThread();
2537}
2538
2539// Returns true if JIT compilations are enabled. GetJit() will be not null in this case.
2540bool Runtime::UseJitCompilation() const {
2541  return (jit_ != nullptr) && jit_->UseJitCompilation();
2542}
2543
2544void Runtime::EnvSnapshot::TakeSnapshot() {
2545  char** env = GetEnviron();
2546  for (size_t i = 0; env[i] != nullptr; ++i) {
2547    name_value_pairs_.emplace_back(new std::string(env[i]));
2548  }
2549  // The strings in name_value_pairs_ retain ownership of the c_str, but we assign pointers
2550  // for quick use by GetSnapshot.  This avoids allocation and copying cost at Exec.
2551  c_env_vector_.reset(new char*[name_value_pairs_.size() + 1]);
2552  for (size_t i = 0; env[i] != nullptr; ++i) {
2553    c_env_vector_[i] = const_cast<char*>(name_value_pairs_[i]->c_str());
2554  }
2555  c_env_vector_[name_value_pairs_.size()] = nullptr;
2556}
2557
2558char** Runtime::EnvSnapshot::GetSnapshot() const {
2559  return c_env_vector_.get();
2560}
2561
2562void Runtime::AddSystemWeakHolder(gc::AbstractSystemWeakHolder* holder) {
2563  gc::ScopedGCCriticalSection gcs(Thread::Current(),
2564                                  gc::kGcCauseAddRemoveSystemWeakHolder,
2565                                  gc::kCollectorTypeAddRemoveSystemWeakHolder);
2566  // Note: The ScopedGCCriticalSection also ensures that the rest of the function is in
2567  //       a critical section.
2568  system_weak_holders_.push_back(holder);
2569}
2570
2571void Runtime::RemoveSystemWeakHolder(gc::AbstractSystemWeakHolder* holder) {
2572  gc::ScopedGCCriticalSection gcs(Thread::Current(),
2573                                  gc::kGcCauseAddRemoveSystemWeakHolder,
2574                                  gc::kCollectorTypeAddRemoveSystemWeakHolder);
2575  auto it = std::find(system_weak_holders_.begin(), system_weak_holders_.end(), holder);
2576  if (it != system_weak_holders_.end()) {
2577    system_weak_holders_.erase(it);
2578  }
2579}
2580
2581RuntimeCallbacks* Runtime::GetRuntimeCallbacks() {
2582  return callbacks_.get();
2583}
2584
2585// Used to patch boot image method entry point to interpreter bridge.
2586class UpdateEntryPointsClassVisitor : public ClassVisitor {
2587 public:
2588  explicit UpdateEntryPointsClassVisitor(instrumentation::Instrumentation* instrumentation)
2589      : instrumentation_(instrumentation) {}
2590
2591  bool operator()(ObjPtr<mirror::Class> klass) OVERRIDE REQUIRES(Locks::mutator_lock_) {
2592    auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
2593    for (auto& m : klass->GetMethods(pointer_size)) {
2594      const void* code = m.GetEntryPointFromQuickCompiledCode();
2595      if (Runtime::Current()->GetHeap()->IsInBootImageOatFile(code) &&
2596          !m.IsNative() &&
2597          !m.IsProxyMethod()) {
2598        instrumentation_->UpdateMethodsCodeForJavaDebuggable(&m, GetQuickToInterpreterBridge());
2599      }
2600    }
2601    return true;
2602  }
2603
2604 private:
2605  instrumentation::Instrumentation* const instrumentation_;
2606};
2607
2608void Runtime::SetJavaDebuggable(bool value) {
2609  is_java_debuggable_ = value;
2610  // Do not call DeoptimizeBootImage just yet, the runtime may still be starting up.
2611}
2612
2613void Runtime::DeoptimizeBootImage() {
2614  // If we've already started and we are setting this runtime to debuggable,
2615  // we patch entry points of methods in boot image to interpreter bridge, as
2616  // boot image code may be AOT compiled as not debuggable.
2617  if (!GetInstrumentation()->IsForcedInterpretOnly()) {
2618    ScopedObjectAccess soa(Thread::Current());
2619    UpdateEntryPointsClassVisitor visitor(GetInstrumentation());
2620    GetClassLinker()->VisitClasses(&visitor);
2621  }
2622}
2623}  // namespace art
2624