runtime.cc revision 3d21bdf8894e780d349c481e5c9e29fe1556051c
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "runtime.h"
18
19// sys/mount.h has to come before linux/fs.h due to redefinition of MS_RDONLY, MS_BIND, etc
20#include <sys/mount.h>
21#ifdef __linux__
22#include <linux/fs.h>
23#endif
24
25#include <signal.h>
26#include <sys/syscall.h>
27#include <valgrind.h>
28
29#include <cstdio>
30#include <cstdlib>
31#include <limits>
32#include <memory_representation.h>
33#include <vector>
34#include <fcntl.h>
35
36#include "JniConstants.h"
37#include "ScopedLocalRef.h"
38#include "arch/arm/quick_method_frame_info_arm.h"
39#include "arch/arm/registers_arm.h"
40#include "arch/arm64/quick_method_frame_info_arm64.h"
41#include "arch/arm64/registers_arm64.h"
42#include "arch/instruction_set_features.h"
43#include "arch/mips/quick_method_frame_info_mips.h"
44#include "arch/mips/registers_mips.h"
45#include "arch/mips64/quick_method_frame_info_mips64.h"
46#include "arch/mips64/registers_mips64.h"
47#include "arch/x86/quick_method_frame_info_x86.h"
48#include "arch/x86/registers_x86.h"
49#include "arch/x86_64/quick_method_frame_info_x86_64.h"
50#include "arch/x86_64/registers_x86_64.h"
51#include "art_field-inl.h"
52#include "art_method-inl.h"
53#include "asm_support.h"
54#include "atomic.h"
55#include "base/arena_allocator.h"
56#include "base/dumpable.h"
57#include "base/unix_file/fd_file.h"
58#include "class_linker-inl.h"
59#include "compiler_callbacks.h"
60#include "debugger.h"
61#include "elf_file.h"
62#include "entrypoints/runtime_asm_entrypoints.h"
63#include "fault_handler.h"
64#include "gc/accounting/card_table-inl.h"
65#include "gc/heap.h"
66#include "gc/space/image_space.h"
67#include "gc/space/space-inl.h"
68#include "handle_scope-inl.h"
69#include "image.h"
70#include "instrumentation.h"
71#include "intern_table.h"
72#include "interpreter/interpreter.h"
73#include "jit/jit.h"
74#include "jni_internal.h"
75#include "linear_alloc.h"
76#include "mirror/array.h"
77#include "mirror/class-inl.h"
78#include "mirror/class_loader.h"
79#include "mirror/field.h"
80#include "mirror/method.h"
81#include "mirror/stack_trace_element.h"
82#include "mirror/throwable.h"
83#include "monitor.h"
84#include "native/dalvik_system_DexFile.h"
85#include "native/dalvik_system_VMDebug.h"
86#include "native/dalvik_system_VMRuntime.h"
87#include "native/dalvik_system_VMStack.h"
88#include "native/dalvik_system_ZygoteHooks.h"
89#include "native/java_lang_Class.h"
90#include "native/java_lang_DexCache.h"
91#include "native/java_lang_Object.h"
92#include "native/java_lang_Runtime.h"
93#include "native/java_lang_String.h"
94#include "native/java_lang_StringFactory.h"
95#include "native/java_lang_System.h"
96#include "native/java_lang_Thread.h"
97#include "native/java_lang_Throwable.h"
98#include "native/java_lang_VMClassLoader.h"
99#include "native/java_lang_ref_FinalizerReference.h"
100#include "native/java_lang_ref_Reference.h"
101#include "native/java_lang_reflect_Array.h"
102#include "native/java_lang_reflect_Constructor.h"
103#include "native/java_lang_reflect_Field.h"
104#include "native/java_lang_reflect_Method.h"
105#include "native/java_lang_reflect_Proxy.h"
106#include "native/java_util_concurrent_atomic_AtomicLong.h"
107#include "native/libcore_util_CharsetUtils.h"
108#include "native/org_apache_harmony_dalvik_ddmc_DdmServer.h"
109#include "native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.h"
110#include "native/sun_misc_Unsafe.h"
111#include "native_bridge_art_interface.h"
112#include "oat_file.h"
113#include "os.h"
114#include "parsed_options.h"
115#include "profiler.h"
116#include "quick/quick_method_frame_info.h"
117#include "reflection.h"
118#include "runtime_options.h"
119#include "ScopedLocalRef.h"
120#include "scoped_thread_state_change.h"
121#include "sigchain.h"
122#include "signal_catcher.h"
123#include "signal_set.h"
124#include "thread.h"
125#include "thread_list.h"
126#include "trace.h"
127#include "transaction.h"
128#include "verifier/method_verifier.h"
129#include "well_known_classes.h"
130
131namespace art {
132
133// If a signal isn't handled properly, enable a handler that attempts to dump the Java stack.
134static constexpr bool kEnableJavaStackTraceHandler = false;
135Runtime* Runtime::instance_ = nullptr;
136
137struct TraceConfig {
138  Trace::TraceMode trace_mode;
139  Trace::TraceOutputMode trace_output_mode;
140  std::string trace_file;
141  size_t trace_file_size;
142};
143
144Runtime::Runtime()
145    : instruction_set_(kNone),
146      compiler_callbacks_(nullptr),
147      is_zygote_(false),
148      must_relocate_(false),
149      is_concurrent_gc_enabled_(true),
150      is_explicit_gc_disabled_(false),
151      dex2oat_enabled_(true),
152      image_dex2oat_enabled_(true),
153      default_stack_size_(0),
154      heap_(nullptr),
155      max_spins_before_thin_lock_inflation_(Monitor::kDefaultMaxSpinsBeforeThinLockInflation),
156      monitor_list_(nullptr),
157      monitor_pool_(nullptr),
158      thread_list_(nullptr),
159      intern_table_(nullptr),
160      class_linker_(nullptr),
161      signal_catcher_(nullptr),
162      java_vm_(nullptr),
163      fault_message_lock_("Fault message lock"),
164      fault_message_(""),
165      threads_being_born_(0),
166      shutdown_cond_(new ConditionVariable("Runtime shutdown", *Locks::runtime_shutdown_lock_)),
167      shutting_down_(false),
168      shutting_down_started_(false),
169      started_(false),
170      finished_starting_(false),
171      vfprintf_(nullptr),
172      exit_(nullptr),
173      abort_(nullptr),
174      stats_enabled_(false),
175      running_on_valgrind_(RUNNING_ON_VALGRIND > 0),
176      profiler_started_(false),
177      instrumentation_(),
178      main_thread_group_(nullptr),
179      system_thread_group_(nullptr),
180      system_class_loader_(nullptr),
181      dump_gc_performance_on_shutdown_(false),
182      preinitialization_transaction_(nullptr),
183      verify_(false),
184      allow_dex_file_fallback_(true),
185      target_sdk_version_(0),
186      implicit_null_checks_(false),
187      implicit_so_checks_(false),
188      implicit_suspend_checks_(false),
189      is_native_bridge_loaded_(false),
190      zygote_max_failed_boots_(0) {
191  CheckAsmSupportOffsetsAndSizes();
192  std::fill(callee_save_methods_, callee_save_methods_ + arraysize(callee_save_methods_), 0u);
193}
194
195Runtime::~Runtime() {
196  if (is_native_bridge_loaded_) {
197    UnloadNativeBridge();
198  }
199  if (dump_gc_performance_on_shutdown_) {
200    // This can't be called from the Heap destructor below because it
201    // could call RosAlloc::InspectAll() which needs the thread_list
202    // to be still alive.
203    heap_->DumpGcPerformanceInfo(LOG(INFO));
204  }
205
206  Thread* self = Thread::Current();
207  const bool attach_shutdown_thread = self == nullptr;
208  if (attach_shutdown_thread) {
209    CHECK(AttachCurrentThread("Shutdown thread", false, nullptr, false));
210    self = Thread::Current();
211  } else {
212    LOG(WARNING) << "Current thread not detached in Runtime shutdown";
213  }
214
215  {
216    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
217    shutting_down_started_ = true;
218    while (threads_being_born_ > 0) {
219      shutdown_cond_->Wait(self);
220    }
221    shutting_down_ = true;
222  }
223  // Shutdown and wait for the daemons.
224  CHECK(self != nullptr);
225  if (IsFinishedStarting()) {
226    self->ClearException();
227    self->GetJniEnv()->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
228                                            WellKnownClasses::java_lang_Daemons_stop);
229  }
230  if (attach_shutdown_thread) {
231    DetachCurrentThread();
232    self = nullptr;
233  }
234
235  // Shut down background profiler before the runtime exits.
236  if (profiler_started_) {
237    BackgroundMethodSamplingProfiler::Shutdown();
238  }
239
240  Trace::Shutdown();
241
242  // Make sure to let the GC complete if it is running.
243  heap_->WaitForGcToComplete(gc::kGcCauseBackground, self);
244  heap_->DeleteThreadPool();
245  if (jit_.get() != nullptr) {
246    VLOG(jit) << "Deleting jit thread pool";
247    // Delete thread pool before the thread list since we don't want to wait forever on the
248    // JIT compiler threads.
249    jit_->DeleteThreadPool();
250  }
251
252  // Make sure our internal threads are dead before we start tearing down things they're using.
253  Dbg::StopJdwp();
254  delete signal_catcher_;
255
256  // Make sure all other non-daemon threads have terminated, and all daemon threads are suspended.
257  delete thread_list_;
258
259  // Delete the JIT after thread list to ensure that there is no remaining threads which could be
260  // accessing the instrumentation when we delete it.
261  if (jit_.get() != nullptr) {
262    VLOG(jit) << "Deleting jit";
263    jit_.reset(nullptr);
264  }
265  linear_alloc_.reset();
266  arena_pool_.reset();
267  low_4gb_arena_pool_.reset();
268
269  // Shutdown the fault manager if it was initialized.
270  fault_manager.Shutdown();
271
272  delete monitor_list_;
273  delete monitor_pool_;
274  delete class_linker_;
275  delete heap_;
276  delete intern_table_;
277  delete java_vm_;
278  Thread::Shutdown();
279  QuasiAtomic::Shutdown();
280  verifier::MethodVerifier::Shutdown();
281  MemMap::Shutdown();
282  // TODO: acquire a static mutex on Runtime to avoid racing.
283  CHECK(instance_ == nullptr || instance_ == this);
284  instance_ = nullptr;
285}
286
287struct AbortState {
288  void Dump(std::ostream& os) const {
289    if (gAborting > 1) {
290      os << "Runtime aborting --- recursively, so no thread-specific detail!\n";
291      return;
292    }
293    gAborting++;
294    os << "Runtime aborting...\n";
295    if (Runtime::Current() == nullptr) {
296      os << "(Runtime does not yet exist!)\n";
297      return;
298    }
299    Thread* self = Thread::Current();
300    if (self == nullptr) {
301      os << "(Aborting thread was not attached to runtime!)\n";
302      DumpKernelStack(os, GetTid(), "  kernel: ", false);
303      DumpNativeStack(os, GetTid(), "  native: ", nullptr);
304    } else {
305      os << "Aborting thread:\n";
306      if (Locks::mutator_lock_->IsExclusiveHeld(self) || Locks::mutator_lock_->IsSharedHeld(self)) {
307        DumpThread(os, self);
308      } else {
309        if (Locks::mutator_lock_->SharedTryLock(self)) {
310          DumpThread(os, self);
311          Locks::mutator_lock_->SharedUnlock(self);
312        }
313      }
314    }
315    DumpAllThreads(os, self);
316  }
317
318  // No thread-safety analysis as we do explicitly test for holding the mutator lock.
319  void DumpThread(std::ostream& os, Thread* self) const NO_THREAD_SAFETY_ANALYSIS {
320    DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self) || Locks::mutator_lock_->IsSharedHeld(self));
321    self->Dump(os);
322    if (self->IsExceptionPending()) {
323      mirror::Throwable* exception = self->GetException();
324      os << "Pending exception " << exception->Dump();
325    }
326  }
327
328  void DumpAllThreads(std::ostream& os, Thread* self) const {
329    Runtime* runtime = Runtime::Current();
330    if (runtime != nullptr) {
331      ThreadList* thread_list = runtime->GetThreadList();
332      if (thread_list != nullptr) {
333        bool tll_already_held = Locks::thread_list_lock_->IsExclusiveHeld(self);
334        bool ml_already_held = Locks::mutator_lock_->IsSharedHeld(self);
335        if (!tll_already_held || !ml_already_held) {
336          os << "Dumping all threads without appropriate locks held:"
337              << (!tll_already_held ? " thread list lock" : "")
338              << (!ml_already_held ? " mutator lock" : "")
339              << "\n";
340        }
341        os << "All threads:\n";
342        thread_list->Dump(os);
343      }
344    }
345  }
346};
347
348void Runtime::Abort() {
349  gAborting++;  // set before taking any locks
350
351  // Ensure that we don't have multiple threads trying to abort at once,
352  // which would result in significantly worse diagnostics.
353  MutexLock mu(Thread::Current(), *Locks::abort_lock_);
354
355  // Get any pending output out of the way.
356  fflush(nullptr);
357
358  // Many people have difficulty distinguish aborts from crashes,
359  // so be explicit.
360  AbortState state;
361  LOG(INTERNAL_FATAL) << Dumpable<AbortState>(state);
362
363  // Call the abort hook if we have one.
364  if (Runtime::Current() != nullptr && Runtime::Current()->abort_ != nullptr) {
365    LOG(INTERNAL_FATAL) << "Calling abort hook...";
366    Runtime::Current()->abort_();
367    // notreached
368    LOG(INTERNAL_FATAL) << "Unexpectedly returned from abort hook!";
369  }
370
371#if defined(__GLIBC__)
372  // TODO: we ought to be able to use pthread_kill(3) here (or abort(3),
373  // which POSIX defines in terms of raise(3), which POSIX defines in terms
374  // of pthread_kill(3)). On Linux, though, libcorkscrew can't unwind through
375  // libpthread, which means the stacks we dump would be useless. Calling
376  // tgkill(2) directly avoids that.
377  syscall(__NR_tgkill, getpid(), GetTid(), SIGABRT);
378  // TODO: LLVM installs it's own SIGABRT handler so exit to be safe... Can we disable that in LLVM?
379  // If not, we could use sigaction(3) before calling tgkill(2) and lose this call to exit(3).
380  exit(1);
381#else
382  abort();
383#endif
384  // notreached
385}
386
387void Runtime::PreZygoteFork() {
388  heap_->PreZygoteFork();
389}
390
391void Runtime::CallExitHook(jint status) {
392  if (exit_ != nullptr) {
393    ScopedThreadStateChange tsc(Thread::Current(), kNative);
394    exit_(status);
395    LOG(WARNING) << "Exit hook returned instead of exiting!";
396  }
397}
398
399void Runtime::SweepSystemWeaks(IsMarkedCallback* visitor, void* arg) {
400  GetInternTable()->SweepInternTableWeaks(visitor, arg);
401  GetMonitorList()->SweepMonitorList(visitor, arg);
402  GetJavaVM()->SweepJniWeakGlobals(visitor, arg);
403}
404
405bool Runtime::Create(const RuntimeOptions& options, bool ignore_unrecognized) {
406  // TODO: acquire a static mutex on Runtime to avoid racing.
407  if (Runtime::instance_ != nullptr) {
408    return false;
409  }
410  InitLogging(nullptr);  // Calls Locks::Init() as a side effect.
411  instance_ = new Runtime;
412  if (!instance_->Init(options, ignore_unrecognized)) {
413    // TODO: Currently deleting the instance will abort the runtime on destruction. Now This will
414    // leak memory, instead. Fix the destructor. b/19100793.
415    // delete instance_;
416    instance_ = nullptr;
417    return false;
418  }
419  return true;
420}
421
422static jobject CreateSystemClassLoader(Runtime* runtime) {
423  if (runtime->IsAotCompiler() && !runtime->GetCompilerCallbacks()->IsBootImage()) {
424    return nullptr;
425  }
426
427  ScopedObjectAccess soa(Thread::Current());
428  ClassLinker* cl = Runtime::Current()->GetClassLinker();
429  auto pointer_size = cl->GetImagePointerSize();
430
431  StackHandleScope<2> hs(soa.Self());
432  Handle<mirror::Class> class_loader_class(
433      hs.NewHandle(soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader)));
434  CHECK(cl->EnsureInitialized(soa.Self(), class_loader_class, true, true));
435
436  ArtMethod* getSystemClassLoader = class_loader_class->FindDirectMethod(
437      "getSystemClassLoader", "()Ljava/lang/ClassLoader;", pointer_size);
438  CHECK(getSystemClassLoader != nullptr);
439
440  JValue result = InvokeWithJValues(soa, nullptr, soa.EncodeMethod(getSystemClassLoader), nullptr);
441  JNIEnv* env = soa.Self()->GetJniEnv();
442  ScopedLocalRef<jobject> system_class_loader(env, soa.AddLocalReference<jobject>(result.GetL()));
443  CHECK(system_class_loader.get() != nullptr);
444
445  soa.Self()->SetClassLoaderOverride(system_class_loader.get());
446
447  Handle<mirror::Class> thread_class(
448      hs.NewHandle(soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread)));
449  CHECK(cl->EnsureInitialized(soa.Self(), thread_class, true, true));
450
451  ArtField* contextClassLoader =
452      thread_class->FindDeclaredInstanceField("contextClassLoader", "Ljava/lang/ClassLoader;");
453  CHECK(contextClassLoader != nullptr);
454
455  // We can't run in a transaction yet.
456  contextClassLoader->SetObject<false>(soa.Self()->GetPeer(),
457                                       soa.Decode<mirror::ClassLoader*>(system_class_loader.get()));
458
459  return env->NewGlobalRef(system_class_loader.get());
460}
461
462std::string Runtime::GetPatchoatExecutable() const {
463  if (!patchoat_executable_.empty()) {
464    return patchoat_executable_;
465  }
466  std::string patchoat_executable(GetAndroidRoot());
467  patchoat_executable += (kIsDebugBuild ? "/bin/patchoatd" : "/bin/patchoat");
468  return patchoat_executable;
469}
470
471std::string Runtime::GetCompilerExecutable() const {
472  if (!compiler_executable_.empty()) {
473    return compiler_executable_;
474  }
475  std::string compiler_executable(GetAndroidRoot());
476  compiler_executable += (kIsDebugBuild ? "/bin/dex2oatd" : "/bin/dex2oat");
477  return compiler_executable;
478}
479
480bool Runtime::Start() {
481  VLOG(startup) << "Runtime::Start entering";
482
483  // Restore main thread state to kNative as expected by native code.
484  Thread* self = Thread::Current();
485
486  self->TransitionFromRunnableToSuspended(kNative);
487
488  started_ = true;
489
490  // Use !IsAotCompiler so that we get test coverage, tests are never the zygote.
491  if (!IsAotCompiler()) {
492    ScopedObjectAccess soa(self);
493    gc::space::ImageSpace* image_space = heap_->GetImageSpace();
494    if (image_space != nullptr) {
495      GetInternTable()->AddImageStringsToTable(image_space);
496      GetClassLinker()->MoveImageClassesToClassTable();
497    }
498  }
499
500  // If we are the zygote then we need to wait until after forking to create the code cache
501  // due to SELinux restrictions on r/w/x memory regions.
502  if (!IsZygote() && jit_options_->UseJIT()) {
503    CreateJit();
504  }
505
506  if (!IsImageDex2OatEnabled() || !GetHeap()->HasImageSpace()) {
507    ScopedObjectAccess soa(self);
508    StackHandleScope<1> hs(soa.Self());
509    auto klass(hs.NewHandle<mirror::Class>(mirror::Class::GetJavaLangClass()));
510    class_linker_->EnsureInitialized(soa.Self(), klass, true, true);
511  }
512
513  // InitNativeMethods needs to be after started_ so that the classes
514  // it touches will have methods linked to the oat file if necessary.
515  InitNativeMethods();
516
517  // Initialize well known thread group values that may be accessed threads while attaching.
518  InitThreadGroups(self);
519
520  Thread::FinishStartup();
521
522  system_class_loader_ = CreateSystemClassLoader(this);
523
524  if (is_zygote_) {
525    if (!InitZygote()) {
526      return false;
527    }
528  } else {
529    if (is_native_bridge_loaded_) {
530      PreInitializeNativeBridge(".");
531    }
532    DidForkFromZygote(self->GetJniEnv(), NativeBridgeAction::kInitialize,
533                      GetInstructionSetString(kRuntimeISA));
534  }
535
536  StartDaemonThreads();
537
538  {
539    ScopedObjectAccess soa(self);
540    self->GetJniEnv()->locals.AssertEmpty();
541  }
542
543  VLOG(startup) << "Runtime::Start exiting";
544  finished_starting_ = true;
545
546  if (profiler_options_.IsEnabled() && !profile_output_filename_.empty()) {
547    // User has asked for a profile using -Xenable-profiler.
548    // Create the profile file if it doesn't exist.
549    int fd = open(profile_output_filename_.c_str(), O_RDWR|O_CREAT|O_EXCL, 0660);
550    if (fd >= 0) {
551      close(fd);
552    } else if (errno != EEXIST) {
553      LOG(INFO) << "Failed to access the profile file. Profiler disabled.";
554      return true;
555    }
556    StartProfiler(profile_output_filename_.c_str());
557  }
558
559  if (trace_config_.get() != nullptr && trace_config_->trace_file != "") {
560    ScopedThreadStateChange tsc(self, kWaitingForMethodTracingStart);
561    Trace::Start(trace_config_->trace_file.c_str(),
562                 -1,
563                 static_cast<int>(trace_config_->trace_file_size),
564                 0,
565                 trace_config_->trace_output_mode,
566                 trace_config_->trace_mode,
567                 0);
568  }
569
570  return true;
571}
572
573void Runtime::EndThreadBirth() EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_) {
574  DCHECK_GT(threads_being_born_, 0U);
575  threads_being_born_--;
576  if (shutting_down_started_ && threads_being_born_ == 0) {
577    shutdown_cond_->Broadcast(Thread::Current());
578  }
579}
580
581// Do zygote-mode-only initialization.
582bool Runtime::InitZygote() {
583#ifdef __linux__
584  // zygote goes into its own process group
585  setpgid(0, 0);
586
587  // See storage config details at http://source.android.com/tech/storage/
588  // Create private mount namespace shared by all children
589  if (unshare(CLONE_NEWNS) == -1) {
590    PLOG(WARNING) << "Failed to unshare()";
591    return false;
592  }
593
594  // Mark rootfs as being a slave so that changes from default
595  // namespace only flow into our children.
596  if (mount("rootfs", "/", nullptr, (MS_SLAVE | MS_REC), nullptr) == -1) {
597    PLOG(WARNING) << "Failed to mount() rootfs as MS_SLAVE";
598    return false;
599  }
600
601  // Create a staging tmpfs that is shared by our children; they will
602  // bind mount storage into their respective private namespaces, which
603  // are isolated from each other.
604  const char* target_base = getenv("EMULATED_STORAGE_TARGET");
605  if (target_base != nullptr) {
606    if (mount("tmpfs", target_base, "tmpfs", MS_NOSUID | MS_NODEV,
607              "uid=0,gid=1028,mode=0751") == -1) {
608      LOG(WARNING) << "Failed to mount tmpfs to " << target_base;
609      return false;
610    }
611  }
612
613  return true;
614#else
615  UNIMPLEMENTED(FATAL);
616  return false;
617#endif
618}
619
620void Runtime::DidForkFromZygote(JNIEnv* env, NativeBridgeAction action, const char* isa) {
621  is_zygote_ = false;
622
623  if (is_native_bridge_loaded_) {
624    switch (action) {
625      case NativeBridgeAction::kUnload:
626        UnloadNativeBridge();
627        is_native_bridge_loaded_ = false;
628        break;
629
630      case NativeBridgeAction::kInitialize:
631        InitializeNativeBridge(env, isa);
632        break;
633    }
634  }
635
636  // Create the thread pools.
637  heap_->CreateThreadPool();
638  if (jit_.get() == nullptr && jit_options_->UseJIT()) {
639    // Create the JIT if the flag is set and we haven't already create it (happens for run-tests).
640    CreateJit();
641  }
642
643  StartSignalCatcher();
644
645  // Start the JDWP thread. If the command-line debugger flags specified "suspend=y",
646  // this will pause the runtime, so we probably want this to come last.
647  Dbg::StartJdwp();
648}
649
650void Runtime::StartSignalCatcher() {
651  if (!is_zygote_) {
652    signal_catcher_ = new SignalCatcher(stack_trace_file_);
653  }
654}
655
656bool Runtime::IsShuttingDown(Thread* self) {
657  MutexLock mu(self, *Locks::runtime_shutdown_lock_);
658  return IsShuttingDownLocked();
659}
660
661void Runtime::StartDaemonThreads() {
662  VLOG(startup) << "Runtime::StartDaemonThreads entering";
663
664  Thread* self = Thread::Current();
665
666  // Must be in the kNative state for calling native methods.
667  CHECK_EQ(self->GetState(), kNative);
668
669  JNIEnv* env = self->GetJniEnv();
670  env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
671                            WellKnownClasses::java_lang_Daemons_start);
672  if (env->ExceptionCheck()) {
673    env->ExceptionDescribe();
674    LOG(FATAL) << "Error starting java.lang.Daemons";
675  }
676
677  VLOG(startup) << "Runtime::StartDaemonThreads exiting";
678}
679
680static bool OpenDexFilesFromImage(const std::string& image_location,
681                                  std::vector<std::unique_ptr<const DexFile>>* dex_files,
682                                  size_t* failures) {
683  DCHECK(dex_files != nullptr) << "OpenDexFilesFromImage: out-param is nullptr";
684  std::string system_filename;
685  bool has_system = false;
686  std::string cache_filename_unused;
687  bool dalvik_cache_exists_unused;
688  bool has_cache_unused;
689  bool is_global_cache_unused;
690  bool found_image = gc::space::ImageSpace::FindImageFilename(image_location.c_str(),
691                                                              kRuntimeISA,
692                                                              &system_filename,
693                                                              &has_system,
694                                                              &cache_filename_unused,
695                                                              &dalvik_cache_exists_unused,
696                                                              &has_cache_unused,
697                                                              &is_global_cache_unused);
698  *failures = 0;
699  if (!found_image || !has_system) {
700    return false;
701  }
702  std::string error_msg;
703  // We are falling back to non-executable use of the oat file because patching failed, presumably
704  // due to lack of space.
705  std::string oat_filename = ImageHeader::GetOatLocationFromImageLocation(system_filename.c_str());
706  std::string oat_location = ImageHeader::GetOatLocationFromImageLocation(image_location.c_str());
707  std::unique_ptr<File> file(OS::OpenFileForReading(oat_filename.c_str()));
708  if (file.get() == nullptr) {
709    return false;
710  }
711  std::unique_ptr<ElfFile> elf_file(ElfFile::Open(file.release(), false, false, &error_msg));
712  if (elf_file.get() == nullptr) {
713    return false;
714  }
715  std::unique_ptr<OatFile> oat_file(OatFile::OpenWithElfFile(elf_file.release(), oat_location,
716                                                             nullptr, &error_msg));
717  if (oat_file.get() == nullptr) {
718    LOG(INFO) << "Unable to use '" << oat_filename << "' because " << error_msg;
719    return false;
720  }
721
722  for (const OatFile::OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) {
723    if (oat_dex_file == nullptr) {
724      *failures += 1;
725      continue;
726    }
727    std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error_msg);
728    if (dex_file.get() == nullptr) {
729      *failures += 1;
730    } else {
731      dex_files->push_back(std::move(dex_file));
732    }
733  }
734  Runtime::Current()->GetClassLinker()->RegisterOatFile(oat_file.release());
735  return true;
736}
737
738
739static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames,
740                           const std::vector<std::string>& dex_locations,
741                           const std::string& image_location,
742                           std::vector<std::unique_ptr<const DexFile>>* dex_files) {
743  DCHECK(dex_files != nullptr) << "OpenDexFiles: out-param is nullptr";
744  size_t failure_count = 0;
745  if (!image_location.empty() && OpenDexFilesFromImage(image_location, dex_files, &failure_count)) {
746    return failure_count;
747  }
748  failure_count = 0;
749  for (size_t i = 0; i < dex_filenames.size(); i++) {
750    const char* dex_filename = dex_filenames[i].c_str();
751    const char* dex_location = dex_locations[i].c_str();
752    std::string error_msg;
753    if (!OS::FileExists(dex_filename)) {
754      LOG(WARNING) << "Skipping non-existent dex file '" << dex_filename << "'";
755      continue;
756    }
757    if (!DexFile::Open(dex_filename, dex_location, &error_msg, dex_files)) {
758      LOG(WARNING) << "Failed to open .dex from file '" << dex_filename << "': " << error_msg;
759      ++failure_count;
760    }
761  }
762  return failure_count;
763}
764
765bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized) {
766  CHECK_EQ(sysconf(_SC_PAGE_SIZE), kPageSize);
767
768  MemMap::Init();
769
770  using Opt = RuntimeArgumentMap;
771  RuntimeArgumentMap runtime_options;
772  std::unique_ptr<ParsedOptions> parsed_options(
773      ParsedOptions::Create(raw_options, ignore_unrecognized, &runtime_options));
774  if (parsed_options.get() == nullptr) {
775    LOG(ERROR) << "Failed to parse options";
776    return false;
777  }
778  VLOG(startup) << "Runtime::Init -verbose:startup enabled";
779
780  QuasiAtomic::Startup();
781
782  Monitor::Init(runtime_options.GetOrDefault(Opt::LockProfThreshold),
783                runtime_options.GetOrDefault(Opt::HookIsSensitiveThread));
784
785  boot_class_path_string_ = runtime_options.ReleaseOrDefault(Opt::BootClassPath);
786  class_path_string_ = runtime_options.ReleaseOrDefault(Opt::ClassPath);
787  properties_ = runtime_options.ReleaseOrDefault(Opt::PropertiesList);
788
789  compiler_callbacks_ = runtime_options.GetOrDefault(Opt::CompilerCallbacksPtr);
790  patchoat_executable_ = runtime_options.ReleaseOrDefault(Opt::PatchOat);
791  must_relocate_ = runtime_options.GetOrDefault(Opt::Relocate);
792  is_zygote_ = runtime_options.Exists(Opt::Zygote);
793  is_explicit_gc_disabled_ = runtime_options.Exists(Opt::DisableExplicitGC);
794  dex2oat_enabled_ = runtime_options.GetOrDefault(Opt::Dex2Oat);
795  image_dex2oat_enabled_ = runtime_options.GetOrDefault(Opt::ImageDex2Oat);
796
797  vfprintf_ = runtime_options.GetOrDefault(Opt::HookVfprintf);
798  exit_ = runtime_options.GetOrDefault(Opt::HookExit);
799  abort_ = runtime_options.GetOrDefault(Opt::HookAbort);
800
801  default_stack_size_ = runtime_options.GetOrDefault(Opt::StackSize);
802  stack_trace_file_ = runtime_options.ReleaseOrDefault(Opt::StackTraceFile);
803
804  compiler_executable_ = runtime_options.ReleaseOrDefault(Opt::Compiler);
805  compiler_options_ = runtime_options.ReleaseOrDefault(Opt::CompilerOptions);
806  image_compiler_options_ = runtime_options.ReleaseOrDefault(Opt::ImageCompilerOptions);
807  image_location_ = runtime_options.GetOrDefault(Opt::Image);
808
809  max_spins_before_thin_lock_inflation_ =
810      runtime_options.GetOrDefault(Opt::MaxSpinsBeforeThinLockInflation);
811
812  monitor_list_ = new MonitorList;
813  monitor_pool_ = MonitorPool::Create();
814  thread_list_ = new ThreadList;
815  intern_table_ = new InternTable;
816
817  verify_ = runtime_options.GetOrDefault(Opt::Verify);
818  allow_dex_file_fallback_ = !runtime_options.Exists(Opt::NoDexFileFallback);
819
820  Split(runtime_options.GetOrDefault(Opt::CpuAbiList), ',', &cpu_abilist_);
821
822  if (runtime_options.GetOrDefault(Opt::Interpret)) {
823    GetInstrumentation()->ForceInterpretOnly();
824  }
825
826  zygote_max_failed_boots_ = runtime_options.GetOrDefault(Opt::ZygoteMaxFailedBoots);
827
828  XGcOption xgc_option = runtime_options.GetOrDefault(Opt::GcOption);
829  heap_ = new gc::Heap(runtime_options.GetOrDefault(Opt::MemoryInitialSize),
830                       runtime_options.GetOrDefault(Opt::HeapGrowthLimit),
831                       runtime_options.GetOrDefault(Opt::HeapMinFree),
832                       runtime_options.GetOrDefault(Opt::HeapMaxFree),
833                       runtime_options.GetOrDefault(Opt::HeapTargetUtilization),
834                       runtime_options.GetOrDefault(Opt::ForegroundHeapGrowthMultiplier),
835                       runtime_options.GetOrDefault(Opt::MemoryMaximumSize),
836                       runtime_options.GetOrDefault(Opt::NonMovingSpaceCapacity),
837                       runtime_options.GetOrDefault(Opt::Image),
838                       runtime_options.GetOrDefault(Opt::ImageInstructionSet),
839                       xgc_option.collector_type_,
840                       runtime_options.GetOrDefault(Opt::BackgroundGc),
841                       runtime_options.GetOrDefault(Opt::LargeObjectSpace),
842                       runtime_options.GetOrDefault(Opt::LargeObjectThreshold),
843                       runtime_options.GetOrDefault(Opt::ParallelGCThreads),
844                       runtime_options.GetOrDefault(Opt::ConcGCThreads),
845                       runtime_options.Exists(Opt::LowMemoryMode),
846                       runtime_options.GetOrDefault(Opt::LongPauseLogThreshold),
847                       runtime_options.GetOrDefault(Opt::LongGCLogThreshold),
848                       runtime_options.Exists(Opt::IgnoreMaxFootprint),
849                       runtime_options.GetOrDefault(Opt::UseTLAB),
850                       xgc_option.verify_pre_gc_heap_,
851                       xgc_option.verify_pre_sweeping_heap_,
852                       xgc_option.verify_post_gc_heap_,
853                       xgc_option.verify_pre_gc_rosalloc_,
854                       xgc_option.verify_pre_sweeping_rosalloc_,
855                       xgc_option.verify_post_gc_rosalloc_,
856                       runtime_options.GetOrDefault(Opt::EnableHSpaceCompactForOOM),
857                       runtime_options.GetOrDefault(Opt::HSpaceCompactForOOMMinIntervalsMs));
858
859  if (heap_->GetImageSpace() == nullptr && !allow_dex_file_fallback_) {
860    LOG(ERROR) << "Dex file fallback disabled, cannot continue without image.";
861    return false;
862  }
863
864  dump_gc_performance_on_shutdown_ = runtime_options.Exists(Opt::DumpGCPerformanceOnShutdown);
865
866  if (runtime_options.Exists(Opt::JdwpOptions)) {
867    Dbg::ConfigureJdwp(runtime_options.GetOrDefault(Opt::JdwpOptions));
868  }
869
870  jit_options_.reset(jit::JitOptions::CreateFromRuntimeArguments(runtime_options));
871  if (IsAotCompiler()) {
872    // If we are already the compiler at this point, we must be dex2oat. Don't create the jit in
873    // this case.
874    // If runtime_options doesn't have UseJIT set to true then CreateFromRuntimeArguments returns
875    // null and we don't create the jit.
876    jit_options_->SetUseJIT(false);
877  }
878
879  // Use MemMap arena pool for jit, malloc otherwise. Malloc arenas are faster to allocate but
880  // can't be trimmed as easily.
881  const bool use_malloc = IsAotCompiler();
882  arena_pool_.reset(new ArenaPool(use_malloc, false));
883  if (IsCompiler() && Is64BitInstructionSet(kRuntimeISA)) {
884    // 4gb, no malloc. Explanation in header.
885    low_4gb_arena_pool_.reset(new ArenaPool(false, true));
886    linear_alloc_.reset(new LinearAlloc(low_4gb_arena_pool_.get()));
887  } else {
888    linear_alloc_.reset(new LinearAlloc(arena_pool_.get()));
889  }
890
891  BlockSignals();
892  InitPlatformSignalHandlers();
893
894  // Change the implicit checks flags based on runtime architecture.
895  switch (kRuntimeISA) {
896    case kArm:
897    case kThumb2:
898    case kX86:
899    case kArm64:
900    case kX86_64:
901      implicit_null_checks_ = true;
902      // Installing stack protection does not play well with valgrind.
903      implicit_so_checks_ = (RUNNING_ON_VALGRIND == 0);
904      break;
905    default:
906      // Keep the defaults.
907      break;
908  }
909
910  // Always initialize the signal chain so that any calls to sigaction get
911  // correctly routed to the next in the chain regardless of whether we
912  // have claimed the signal or not.
913  InitializeSignalChain();
914
915  if (implicit_null_checks_ || implicit_so_checks_ || implicit_suspend_checks_) {
916    fault_manager.Init();
917
918    // These need to be in a specific order.  The null point check handler must be
919    // after the suspend check and stack overflow check handlers.
920    //
921    // Note: the instances attach themselves to the fault manager and are handled by it. The manager
922    //       will delete the instance on Shutdown().
923    if (implicit_suspend_checks_) {
924      new SuspensionHandler(&fault_manager);
925    }
926
927    if (implicit_so_checks_) {
928      new StackOverflowHandler(&fault_manager);
929    }
930
931    if (implicit_null_checks_) {
932      new NullPointerHandler(&fault_manager);
933    }
934
935    if (kEnableJavaStackTraceHandler) {
936      new JavaStackTraceHandler(&fault_manager);
937    }
938  }
939
940  java_vm_ = new JavaVMExt(this, runtime_options);
941
942  Thread::Startup();
943
944  // ClassLinker needs an attached thread, but we can't fully attach a thread without creating
945  // objects. We can't supply a thread group yet; it will be fixed later. Since we are the main
946  // thread, we do not get a java peer.
947  Thread* self = Thread::Attach("main", false, nullptr, false);
948  CHECK_EQ(self->GetThreadId(), ThreadList::kMainThreadId);
949  CHECK(self != nullptr);
950
951  // Set us to runnable so tools using a runtime can allocate and GC by default
952  self->TransitionFromSuspendedToRunnable();
953
954  // Now we're attached, we can take the heap locks and validate the heap.
955  GetHeap()->EnableObjectValidation();
956
957  CHECK_GE(GetHeap()->GetContinuousSpaces().size(), 1U);
958  class_linker_ = new ClassLinker(intern_table_);
959  if (GetHeap()->HasImageSpace()) {
960    class_linker_->InitFromImage();
961    if (kIsDebugBuild) {
962      GetHeap()->GetImageSpace()->VerifyImageAllocations();
963    }
964    if (boot_class_path_string_.empty()) {
965      // The bootclasspath is not explicitly specified: construct it from the loaded dex files.
966      const std::vector<const DexFile*>& boot_class_path = GetClassLinker()->GetBootClassPath();
967      std::vector<std::string> dex_locations;
968      dex_locations.reserve(boot_class_path.size());
969      for (const DexFile* dex_file : boot_class_path) {
970        dex_locations.push_back(dex_file->GetLocation());
971      }
972      boot_class_path_string_ = Join(dex_locations, ':');
973    }
974  } else {
975    std::vector<std::string> dex_filenames;
976    Split(boot_class_path_string_, ':', &dex_filenames);
977
978    std::vector<std::string> dex_locations;
979    if (!runtime_options.Exists(Opt::BootClassPathLocations)) {
980      dex_locations = dex_filenames;
981    } else {
982      dex_locations = runtime_options.GetOrDefault(Opt::BootClassPathLocations);
983      CHECK_EQ(dex_filenames.size(), dex_locations.size());
984    }
985
986    std::vector<std::unique_ptr<const DexFile>> boot_class_path;
987    OpenDexFiles(dex_filenames,
988                 dex_locations,
989                 runtime_options.GetOrDefault(Opt::Image),
990                 &boot_class_path);
991    instruction_set_ = runtime_options.GetOrDefault(Opt::ImageInstructionSet);
992    class_linker_->InitWithoutImage(std::move(boot_class_path));
993
994    // TODO: Should we move the following to InitWithoutImage?
995    SetInstructionSet(instruction_set_);
996    for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
997      Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
998      if (!HasCalleeSaveMethod(type)) {
999        SetCalleeSaveMethod(CreateCalleeSaveMethod(), type);
1000      }
1001    }
1002  }
1003
1004  CHECK(class_linker_ != nullptr);
1005
1006  // Initialize the special sentinel_ value early.
1007  sentinel_ = GcRoot<mirror::Object>(class_linker_->AllocObject(self));
1008  CHECK(sentinel_.Read() != nullptr);
1009
1010  verifier::MethodVerifier::Init();
1011
1012  if (runtime_options.Exists(Opt::MethodTrace)) {
1013    trace_config_.reset(new TraceConfig());
1014    trace_config_->trace_file = runtime_options.ReleaseOrDefault(Opt::MethodTraceFile);
1015    trace_config_->trace_file_size = runtime_options.ReleaseOrDefault(Opt::MethodTraceFileSize);
1016    trace_config_->trace_mode = Trace::TraceMode::kMethodTracing;
1017    trace_config_->trace_output_mode = runtime_options.Exists(Opt::MethodTraceStreaming) ?
1018        Trace::TraceOutputMode::kStreaming :
1019        Trace::TraceOutputMode::kFile;
1020  }
1021
1022  {
1023    auto&& profiler_options = runtime_options.ReleaseOrDefault(Opt::ProfilerOpts);
1024    profile_output_filename_ = profiler_options.output_file_name_;
1025
1026    // TODO: Don't do this, just change ProfilerOptions to include the output file name?
1027    ProfilerOptions other_options(
1028        profiler_options.enabled_,
1029        profiler_options.period_s_,
1030        profiler_options.duration_s_,
1031        profiler_options.interval_us_,
1032        profiler_options.backoff_coefficient_,
1033        profiler_options.start_immediately_,
1034        profiler_options.top_k_threshold_,
1035        profiler_options.top_k_change_threshold_,
1036        profiler_options.profile_type_,
1037        profiler_options.max_stack_depth_);
1038
1039    profiler_options_ = other_options;
1040  }
1041
1042  // TODO: move this to just be an Trace::Start argument
1043  Trace::SetDefaultClockSource(runtime_options.GetOrDefault(Opt::ProfileClock));
1044
1045  // Pre-allocate an OutOfMemoryError for the double-OOME case.
1046  self->ThrowNewException("Ljava/lang/OutOfMemoryError;",
1047                          "OutOfMemoryError thrown while trying to throw OutOfMemoryError; "
1048                          "no stack trace available");
1049  pre_allocated_OutOfMemoryError_ = GcRoot<mirror::Throwable>(self->GetException());
1050  self->ClearException();
1051
1052  // Pre-allocate a NoClassDefFoundError for the common case of failing to find a system class
1053  // ahead of checking the application's class loader.
1054  self->ThrowNewException("Ljava/lang/NoClassDefFoundError;",
1055                          "Class not found using the boot class loader; no stack trace available");
1056  pre_allocated_NoClassDefFoundError_ = GcRoot<mirror::Throwable>(self->GetException());
1057  self->ClearException();
1058
1059  // Look for a native bridge.
1060  //
1061  // The intended flow here is, in the case of a running system:
1062  //
1063  // Runtime::Init() (zygote):
1064  //   LoadNativeBridge -> dlopen from cmd line parameter.
1065  //  |
1066  //  V
1067  // Runtime::Start() (zygote):
1068  //   No-op wrt native bridge.
1069  //  |
1070  //  | start app
1071  //  V
1072  // DidForkFromZygote(action)
1073  //   action = kUnload -> dlclose native bridge.
1074  //   action = kInitialize -> initialize library
1075  //
1076  //
1077  // The intended flow here is, in the case of a simple dalvikvm call:
1078  //
1079  // Runtime::Init():
1080  //   LoadNativeBridge -> dlopen from cmd line parameter.
1081  //  |
1082  //  V
1083  // Runtime::Start():
1084  //   DidForkFromZygote(kInitialize) -> try to initialize any native bridge given.
1085  //   No-op wrt native bridge.
1086  {
1087    std::string native_bridge_file_name = runtime_options.ReleaseOrDefault(Opt::NativeBridge);
1088    is_native_bridge_loaded_ = LoadNativeBridge(native_bridge_file_name);
1089  }
1090
1091  VLOG(startup) << "Runtime::Init exiting";
1092
1093  return true;
1094}
1095
1096void Runtime::InitNativeMethods() {
1097  VLOG(startup) << "Runtime::InitNativeMethods entering";
1098  Thread* self = Thread::Current();
1099  JNIEnv* env = self->GetJniEnv();
1100
1101  // Must be in the kNative state for calling native methods (JNI_OnLoad code).
1102  CHECK_EQ(self->GetState(), kNative);
1103
1104  // First set up JniConstants, which is used by both the runtime's built-in native
1105  // methods and libcore.
1106  JniConstants::init(env);
1107  WellKnownClasses::Init(env);
1108
1109  // Then set up the native methods provided by the runtime itself.
1110  RegisterRuntimeNativeMethods(env);
1111
1112  // Then set up libcore, which is just a regular JNI library with a regular JNI_OnLoad.
1113  // Most JNI libraries can just use System.loadLibrary, but libcore can't because it's
1114  // the library that implements System.loadLibrary!
1115  {
1116    std::string reason;
1117    if (!java_vm_->LoadNativeLibrary(env, "libjavacore.so", nullptr, &reason)) {
1118      LOG(FATAL) << "LoadNativeLibrary failed for \"libjavacore.so\": " << reason;
1119    }
1120  }
1121
1122  // Initialize well known classes that may invoke runtime native methods.
1123  WellKnownClasses::LateInit(env);
1124
1125  VLOG(startup) << "Runtime::InitNativeMethods exiting";
1126}
1127
1128void Runtime::InitThreadGroups(Thread* self) {
1129  JNIEnvExt* env = self->GetJniEnv();
1130  ScopedJniEnvLocalRefState env_state(env);
1131  main_thread_group_ =
1132      env->NewGlobalRef(env->GetStaticObjectField(
1133          WellKnownClasses::java_lang_ThreadGroup,
1134          WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup));
1135  CHECK(main_thread_group_ != nullptr || IsAotCompiler());
1136  system_thread_group_ =
1137      env->NewGlobalRef(env->GetStaticObjectField(
1138          WellKnownClasses::java_lang_ThreadGroup,
1139          WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup));
1140  CHECK(system_thread_group_ != nullptr || IsAotCompiler());
1141}
1142
1143jobject Runtime::GetMainThreadGroup() const {
1144  CHECK(main_thread_group_ != nullptr || IsAotCompiler());
1145  return main_thread_group_;
1146}
1147
1148jobject Runtime::GetSystemThreadGroup() const {
1149  CHECK(system_thread_group_ != nullptr || IsAotCompiler());
1150  return system_thread_group_;
1151}
1152
1153jobject Runtime::GetSystemClassLoader() const {
1154  CHECK(system_class_loader_ != nullptr || IsAotCompiler());
1155  return system_class_loader_;
1156}
1157
1158void Runtime::RegisterRuntimeNativeMethods(JNIEnv* env) {
1159  register_dalvik_system_DexFile(env);
1160  register_dalvik_system_VMDebug(env);
1161  register_dalvik_system_VMRuntime(env);
1162  register_dalvik_system_VMStack(env);
1163  register_dalvik_system_ZygoteHooks(env);
1164  register_java_lang_Class(env);
1165  register_java_lang_DexCache(env);
1166  register_java_lang_Object(env);
1167  register_java_lang_ref_FinalizerReference(env);
1168  register_java_lang_reflect_Array(env);
1169  register_java_lang_reflect_Constructor(env);
1170  register_java_lang_reflect_Field(env);
1171  register_java_lang_reflect_Method(env);
1172  register_java_lang_reflect_Proxy(env);
1173  register_java_lang_ref_Reference(env);
1174  register_java_lang_Runtime(env);
1175  register_java_lang_String(env);
1176  register_java_lang_StringFactory(env);
1177  register_java_lang_System(env);
1178  register_java_lang_Thread(env);
1179  register_java_lang_Throwable(env);
1180  register_java_lang_VMClassLoader(env);
1181  register_java_util_concurrent_atomic_AtomicLong(env);
1182  register_libcore_util_CharsetUtils(env);
1183  register_org_apache_harmony_dalvik_ddmc_DdmServer(env);
1184  register_org_apache_harmony_dalvik_ddmc_DdmVmInternal(env);
1185  register_sun_misc_Unsafe(env);
1186}
1187
1188void Runtime::DumpForSigQuit(std::ostream& os) {
1189  GetClassLinker()->DumpForSigQuit(os);
1190  GetInternTable()->DumpForSigQuit(os);
1191  GetJavaVM()->DumpForSigQuit(os);
1192  GetHeap()->DumpForSigQuit(os);
1193  TrackedAllocators::Dump(os);
1194  os << "\n";
1195
1196  thread_list_->DumpForSigQuit(os);
1197  BaseMutex::DumpAll(os);
1198}
1199
1200void Runtime::DumpLockHolders(std::ostream& os) {
1201  uint64_t mutator_lock_owner = Locks::mutator_lock_->GetExclusiveOwnerTid();
1202  pid_t thread_list_lock_owner = GetThreadList()->GetLockOwner();
1203  pid_t classes_lock_owner = GetClassLinker()->GetClassesLockOwner();
1204  pid_t dex_lock_owner = GetClassLinker()->GetDexLockOwner();
1205  if ((thread_list_lock_owner | classes_lock_owner | dex_lock_owner) != 0) {
1206    os << "Mutator lock exclusive owner tid: " << mutator_lock_owner << "\n"
1207       << "ThreadList lock owner tid: " << thread_list_lock_owner << "\n"
1208       << "ClassLinker classes lock owner tid: " << classes_lock_owner << "\n"
1209       << "ClassLinker dex lock owner tid: " << dex_lock_owner << "\n";
1210  }
1211}
1212
1213void Runtime::SetStatsEnabled(bool new_state) {
1214  Thread* self = Thread::Current();
1215  MutexLock mu(self, *Locks::instrument_entrypoints_lock_);
1216  if (new_state == true) {
1217    GetStats()->Clear(~0);
1218    // TODO: wouldn't it make more sense to clear _all_ threads' stats?
1219    self->GetStats()->Clear(~0);
1220    if (stats_enabled_ != new_state) {
1221      GetInstrumentation()->InstrumentQuickAllocEntryPointsLocked();
1222    }
1223  } else if (stats_enabled_ != new_state) {
1224    GetInstrumentation()->UninstrumentQuickAllocEntryPointsLocked();
1225  }
1226  stats_enabled_ = new_state;
1227}
1228
1229void Runtime::ResetStats(int kinds) {
1230  GetStats()->Clear(kinds & 0xffff);
1231  // TODO: wouldn't it make more sense to clear _all_ threads' stats?
1232  Thread::Current()->GetStats()->Clear(kinds >> 16);
1233}
1234
1235int32_t Runtime::GetStat(int kind) {
1236  RuntimeStats* stats;
1237  if (kind < (1<<16)) {
1238    stats = GetStats();
1239  } else {
1240    stats = Thread::Current()->GetStats();
1241    kind >>= 16;
1242  }
1243  switch (kind) {
1244  case KIND_ALLOCATED_OBJECTS:
1245    return stats->allocated_objects;
1246  case KIND_ALLOCATED_BYTES:
1247    return stats->allocated_bytes;
1248  case KIND_FREED_OBJECTS:
1249    return stats->freed_objects;
1250  case KIND_FREED_BYTES:
1251    return stats->freed_bytes;
1252  case KIND_GC_INVOCATIONS:
1253    return stats->gc_for_alloc_count;
1254  case KIND_CLASS_INIT_COUNT:
1255    return stats->class_init_count;
1256  case KIND_CLASS_INIT_TIME:
1257    // Convert ns to us, reduce to 32 bits.
1258    return static_cast<int>(stats->class_init_time_ns / 1000);
1259  case KIND_EXT_ALLOCATED_OBJECTS:
1260  case KIND_EXT_ALLOCATED_BYTES:
1261  case KIND_EXT_FREED_OBJECTS:
1262  case KIND_EXT_FREED_BYTES:
1263    return 0;  // backward compatibility
1264  default:
1265    LOG(FATAL) << "Unknown statistic " << kind;
1266    return -1;  // unreachable
1267  }
1268}
1269
1270void Runtime::BlockSignals() {
1271  SignalSet signals;
1272  signals.Add(SIGPIPE);
1273  // SIGQUIT is used to dump the runtime's state (including stack traces).
1274  signals.Add(SIGQUIT);
1275  // SIGUSR1 is used to initiate a GC.
1276  signals.Add(SIGUSR1);
1277  signals.Block();
1278}
1279
1280bool Runtime::AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
1281                                  bool create_peer) {
1282  return Thread::Attach(thread_name, as_daemon, thread_group, create_peer) != nullptr;
1283}
1284
1285void Runtime::DetachCurrentThread() {
1286  Thread* self = Thread::Current();
1287  if (self == nullptr) {
1288    LOG(FATAL) << "attempting to detach thread that is not attached";
1289  }
1290  if (self->HasManagedStack()) {
1291    LOG(FATAL) << *Thread::Current() << " attempting to detach while still running code";
1292  }
1293  thread_list_->Unregister(self);
1294}
1295
1296mirror::Throwable* Runtime::GetPreAllocatedOutOfMemoryError() {
1297  mirror::Throwable* oome = pre_allocated_OutOfMemoryError_.Read();
1298  if (oome == nullptr) {
1299    LOG(ERROR) << "Failed to return pre-allocated OOME";
1300  }
1301  return oome;
1302}
1303
1304mirror::Throwable* Runtime::GetPreAllocatedNoClassDefFoundError() {
1305  mirror::Throwable* ncdfe = pre_allocated_NoClassDefFoundError_.Read();
1306  if (ncdfe == nullptr) {
1307    LOG(ERROR) << "Failed to return pre-allocated NoClassDefFoundError";
1308  }
1309  return ncdfe;
1310}
1311
1312void Runtime::VisitConstantRoots(RootVisitor* visitor) {
1313  // Visit the classes held as static in mirror classes, these can be visited concurrently and only
1314  // need to be visited once per GC since they never change.
1315  mirror::Class::VisitRoots(visitor);
1316  mirror::Constructor::VisitRoots(visitor);
1317  mirror::Reference::VisitRoots(visitor);
1318  mirror::Method::VisitRoots(visitor);
1319  mirror::StackTraceElement::VisitRoots(visitor);
1320  mirror::String::VisitRoots(visitor);
1321  mirror::Throwable::VisitRoots(visitor);
1322  mirror::Field::VisitRoots(visitor);
1323  // Visit all the primitive array types classes.
1324  mirror::PrimitiveArray<uint8_t>::VisitRoots(visitor);   // BooleanArray
1325  mirror::PrimitiveArray<int8_t>::VisitRoots(visitor);    // ByteArray
1326  mirror::PrimitiveArray<uint16_t>::VisitRoots(visitor);  // CharArray
1327  mirror::PrimitiveArray<double>::VisitRoots(visitor);    // DoubleArray
1328  mirror::PrimitiveArray<float>::VisitRoots(visitor);     // FloatArray
1329  mirror::PrimitiveArray<int32_t>::VisitRoots(visitor);   // IntArray
1330  mirror::PrimitiveArray<int64_t>::VisitRoots(visitor);   // LongArray
1331  mirror::PrimitiveArray<int16_t>::VisitRoots(visitor);   // ShortArray
1332  // Visiting the roots of these ArtMethods is not currently required since all the GcRoots are
1333  // null.
1334  BufferedRootVisitor<16> buffered_visitor(visitor, RootInfo(kRootVMInternal));
1335  if (HasResolutionMethod()) {
1336    resolution_method_->VisitRoots(buffered_visitor);
1337  }
1338  if (HasImtConflictMethod()) {
1339    imt_conflict_method_->VisitRoots(buffered_visitor);
1340  }
1341  if (imt_unimplemented_method_ != nullptr) {
1342    imt_unimplemented_method_->VisitRoots(buffered_visitor);
1343  }
1344  for (size_t i = 0; i < kLastCalleeSaveType; ++i) {
1345    auto* m = reinterpret_cast<ArtMethod*>(callee_save_methods_[i]);
1346    if (m != nullptr) {
1347      m->VisitRoots(buffered_visitor);
1348    }
1349  }
1350}
1351
1352void Runtime::VisitConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags) {
1353  intern_table_->VisitRoots(visitor, flags);
1354  class_linker_->VisitRoots(visitor, flags);
1355  if ((flags & kVisitRootFlagNewRoots) == 0) {
1356    // Guaranteed to have no new roots in the constant roots.
1357    VisitConstantRoots(visitor);
1358  }
1359}
1360
1361void Runtime::VisitTransactionRoots(RootVisitor* visitor) {
1362  if (preinitialization_transaction_ != nullptr) {
1363    preinitialization_transaction_->VisitRoots(visitor);
1364  }
1365}
1366
1367void Runtime::VisitNonThreadRoots(RootVisitor* visitor) {
1368  java_vm_->VisitRoots(visitor);
1369  sentinel_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
1370  pre_allocated_OutOfMemoryError_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
1371  pre_allocated_NoClassDefFoundError_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
1372  verifier::MethodVerifier::VisitStaticRoots(visitor);
1373  VisitTransactionRoots(visitor);
1374}
1375
1376void Runtime::VisitNonConcurrentRoots(RootVisitor* visitor) {
1377  thread_list_->VisitRoots(visitor);
1378  VisitNonThreadRoots(visitor);
1379}
1380
1381void Runtime::VisitThreadRoots(RootVisitor* visitor) {
1382  thread_list_->VisitRoots(visitor);
1383}
1384
1385size_t Runtime::FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback,
1386                                gc::collector::GarbageCollector* collector) {
1387  return thread_list_->FlipThreadRoots(thread_flip_visitor, flip_callback, collector);
1388}
1389
1390void Runtime::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
1391  VisitNonConcurrentRoots(visitor);
1392  VisitConcurrentRoots(visitor, flags);
1393}
1394
1395void Runtime::VisitImageRoots(RootVisitor* visitor) {
1396  for (auto* space : GetHeap()->GetContinuousSpaces()) {
1397    if (space->IsImageSpace()) {
1398      auto* image_space = space->AsImageSpace();
1399      const auto& image_header = image_space->GetImageHeader();
1400      for (size_t i = 0; i < ImageHeader::kImageRootsMax; ++i) {
1401        auto* obj = image_header.GetImageRoot(static_cast<ImageHeader::ImageRoot>(i));
1402        if (obj != nullptr) {
1403          auto* after_obj = obj;
1404          visitor->VisitRoot(&after_obj, RootInfo(kRootStickyClass));
1405          CHECK_EQ(after_obj, obj);
1406        }
1407      }
1408    }
1409  }
1410}
1411
1412ArtMethod* Runtime::CreateImtConflictMethod() {
1413  auto* method = Runtime::Current()->GetClassLinker()->CreateRuntimeMethod();
1414  // When compiling, the code pointer will get set later when the image is loaded.
1415  if (IsAotCompiler()) {
1416    size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
1417    method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
1418  } else {
1419    method->SetEntryPointFromQuickCompiledCode(GetQuickImtConflictStub());
1420  }
1421  return method;
1422}
1423
1424void Runtime::SetImtConflictMethod(ArtMethod* method) {
1425  CHECK(method != nullptr);
1426  CHECK(method->IsRuntimeMethod());
1427  imt_conflict_method_ = method;
1428}
1429
1430ArtMethod* Runtime::CreateResolutionMethod() {
1431  auto* method = Runtime::Current()->GetClassLinker()->CreateRuntimeMethod();
1432  // When compiling, the code pointer will get set later when the image is loaded.
1433  if (IsAotCompiler()) {
1434    size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
1435    method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
1436  } else {
1437    method->SetEntryPointFromQuickCompiledCode(GetQuickResolutionStub());
1438  }
1439  return method;
1440}
1441
1442ArtMethod* Runtime::CreateCalleeSaveMethod() {
1443  auto* method = Runtime::Current()->GetClassLinker()->CreateRuntimeMethod();
1444  size_t pointer_size = GetInstructionSetPointerSize(instruction_set_);
1445  method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size);
1446  DCHECK_NE(instruction_set_, kNone);
1447  DCHECK(method->IsRuntimeMethod());
1448  return method;
1449}
1450
1451void Runtime::DisallowNewSystemWeaks() {
1452  monitor_list_->DisallowNewMonitors();
1453  intern_table_->DisallowNewInterns();
1454  java_vm_->DisallowNewWeakGlobals();
1455}
1456
1457void Runtime::AllowNewSystemWeaks() {
1458  monitor_list_->AllowNewMonitors();
1459  intern_table_->AllowNewInterns();
1460  java_vm_->AllowNewWeakGlobals();
1461}
1462
1463void Runtime::EnsureNewSystemWeaksDisallowed() {
1464  // Lock and unlock the system weak locks once to ensure that no
1465  // threads are still in the middle of adding new system weaks.
1466  monitor_list_->EnsureNewMonitorsDisallowed();
1467  intern_table_->EnsureNewInternsDisallowed();
1468  java_vm_->EnsureNewWeakGlobalsDisallowed();
1469}
1470
1471void Runtime::SetInstructionSet(InstructionSet instruction_set) {
1472  instruction_set_ = instruction_set;
1473  if ((instruction_set_ == kThumb2) || (instruction_set_ == kArm)) {
1474    for (int i = 0; i != kLastCalleeSaveType; ++i) {
1475      CalleeSaveType type = static_cast<CalleeSaveType>(i);
1476      callee_save_method_frame_infos_[i] = arm::ArmCalleeSaveMethodFrameInfo(type);
1477    }
1478  } else if (instruction_set_ == kMips) {
1479    for (int i = 0; i != kLastCalleeSaveType; ++i) {
1480      CalleeSaveType type = static_cast<CalleeSaveType>(i);
1481      callee_save_method_frame_infos_[i] = mips::MipsCalleeSaveMethodFrameInfo(type);
1482    }
1483  } else if (instruction_set_ == kMips64) {
1484    for (int i = 0; i != kLastCalleeSaveType; ++i) {
1485      CalleeSaveType type = static_cast<CalleeSaveType>(i);
1486      callee_save_method_frame_infos_[i] = mips64::Mips64CalleeSaveMethodFrameInfo(type);
1487    }
1488  } else if (instruction_set_ == kX86) {
1489    for (int i = 0; i != kLastCalleeSaveType; ++i) {
1490      CalleeSaveType type = static_cast<CalleeSaveType>(i);
1491      callee_save_method_frame_infos_[i] = x86::X86CalleeSaveMethodFrameInfo(type);
1492    }
1493  } else if (instruction_set_ == kX86_64) {
1494    for (int i = 0; i != kLastCalleeSaveType; ++i) {
1495      CalleeSaveType type = static_cast<CalleeSaveType>(i);
1496      callee_save_method_frame_infos_[i] = x86_64::X86_64CalleeSaveMethodFrameInfo(type);
1497    }
1498  } else if (instruction_set_ == kArm64) {
1499    for (int i = 0; i != kLastCalleeSaveType; ++i) {
1500      CalleeSaveType type = static_cast<CalleeSaveType>(i);
1501      callee_save_method_frame_infos_[i] = arm64::Arm64CalleeSaveMethodFrameInfo(type);
1502    }
1503  } else {
1504    UNIMPLEMENTED(FATAL) << instruction_set_;
1505  }
1506}
1507
1508void Runtime::SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type) {
1509  DCHECK_LT(static_cast<int>(type), static_cast<int>(kLastCalleeSaveType));
1510  CHECK(method != nullptr);
1511  callee_save_methods_[type] = reinterpret_cast<uintptr_t>(method);
1512}
1513
1514void Runtime::StartProfiler(const char* profile_output_filename) {
1515  profile_output_filename_ = profile_output_filename;
1516  profiler_started_ =
1517      BackgroundMethodSamplingProfiler::Start(profile_output_filename_, profiler_options_);
1518}
1519
1520// Transaction support.
1521void Runtime::EnterTransactionMode(Transaction* transaction) {
1522  DCHECK(IsAotCompiler());
1523  DCHECK(transaction != nullptr);
1524  DCHECK(!IsActiveTransaction());
1525  preinitialization_transaction_ = transaction;
1526}
1527
1528void Runtime::ExitTransactionMode() {
1529  DCHECK(IsAotCompiler());
1530  DCHECK(IsActiveTransaction());
1531  preinitialization_transaction_ = nullptr;
1532}
1533
1534bool Runtime::IsTransactionAborted() const {
1535  if (!IsActiveTransaction()) {
1536    return false;
1537  } else {
1538    DCHECK(IsAotCompiler());
1539    return preinitialization_transaction_->IsAborted();
1540  }
1541}
1542
1543void Runtime::AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message) {
1544  DCHECK(IsAotCompiler());
1545  DCHECK(IsActiveTransaction());
1546  // Throwing an exception may cause its class initialization. If we mark the transaction
1547  // aborted before that, we may warn with a false alarm. Throwing the exception before
1548  // marking the transaction aborted avoids that.
1549  preinitialization_transaction_->ThrowAbortError(self, &abort_message);
1550  preinitialization_transaction_->Abort(abort_message);
1551}
1552
1553void Runtime::ThrowTransactionAbortError(Thread* self) {
1554  DCHECK(IsAotCompiler());
1555  DCHECK(IsActiveTransaction());
1556  // Passing nullptr means we rethrow an exception with the earlier transaction abort message.
1557  preinitialization_transaction_->ThrowAbortError(self, nullptr);
1558}
1559
1560void Runtime::RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset,
1561                                      uint8_t value, bool is_volatile) const {
1562  DCHECK(IsAotCompiler());
1563  DCHECK(IsActiveTransaction());
1564  preinitialization_transaction_->RecordWriteFieldBoolean(obj, field_offset, value, is_volatile);
1565}
1566
1567void Runtime::RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset,
1568                                   int8_t value, bool is_volatile) const {
1569  DCHECK(IsAotCompiler());
1570  DCHECK(IsActiveTransaction());
1571  preinitialization_transaction_->RecordWriteFieldByte(obj, field_offset, value, is_volatile);
1572}
1573
1574void Runtime::RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset,
1575                                   uint16_t value, bool is_volatile) const {
1576  DCHECK(IsAotCompiler());
1577  DCHECK(IsActiveTransaction());
1578  preinitialization_transaction_->RecordWriteFieldChar(obj, field_offset, value, is_volatile);
1579}
1580
1581void Runtime::RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset,
1582                                    int16_t value, bool is_volatile) const {
1583  DCHECK(IsAotCompiler());
1584  DCHECK(IsActiveTransaction());
1585  preinitialization_transaction_->RecordWriteFieldShort(obj, field_offset, value, is_volatile);
1586}
1587
1588void Runtime::RecordWriteField32(mirror::Object* obj, MemberOffset field_offset,
1589                                 uint32_t value, bool is_volatile) const {
1590  DCHECK(IsAotCompiler());
1591  DCHECK(IsActiveTransaction());
1592  preinitialization_transaction_->RecordWriteField32(obj, field_offset, value, is_volatile);
1593}
1594
1595void Runtime::RecordWriteField64(mirror::Object* obj, MemberOffset field_offset,
1596                                 uint64_t value, bool is_volatile) const {
1597  DCHECK(IsAotCompiler());
1598  DCHECK(IsActiveTransaction());
1599  preinitialization_transaction_->RecordWriteField64(obj, field_offset, value, is_volatile);
1600}
1601
1602void Runtime::RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset,
1603                                        mirror::Object* value, bool is_volatile) const {
1604  DCHECK(IsAotCompiler());
1605  DCHECK(IsActiveTransaction());
1606  preinitialization_transaction_->RecordWriteFieldReference(obj, field_offset, value, is_volatile);
1607}
1608
1609void Runtime::RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const {
1610  DCHECK(IsAotCompiler());
1611  DCHECK(IsActiveTransaction());
1612  preinitialization_transaction_->RecordWriteArray(array, index, value);
1613}
1614
1615void Runtime::RecordStrongStringInsertion(mirror::String* s) const {
1616  DCHECK(IsAotCompiler());
1617  DCHECK(IsActiveTransaction());
1618  preinitialization_transaction_->RecordStrongStringInsertion(s);
1619}
1620
1621void Runtime::RecordWeakStringInsertion(mirror::String* s) const {
1622  DCHECK(IsAotCompiler());
1623  DCHECK(IsActiveTransaction());
1624  preinitialization_transaction_->RecordWeakStringInsertion(s);
1625}
1626
1627void Runtime::RecordStrongStringRemoval(mirror::String* s) const {
1628  DCHECK(IsAotCompiler());
1629  DCHECK(IsActiveTransaction());
1630  preinitialization_transaction_->RecordStrongStringRemoval(s);
1631}
1632
1633void Runtime::RecordWeakStringRemoval(mirror::String* s) const {
1634  DCHECK(IsAotCompiler());
1635  DCHECK(IsActiveTransaction());
1636  preinitialization_transaction_->RecordWeakStringRemoval(s);
1637}
1638
1639void Runtime::SetFaultMessage(const std::string& message) {
1640  MutexLock mu(Thread::Current(), fault_message_lock_);
1641  fault_message_ = message;
1642}
1643
1644void Runtime::AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* argv)
1645    const {
1646  if (GetInstrumentation()->InterpretOnly() || UseJit()) {
1647    argv->push_back("--compiler-filter=interpret-only");
1648  }
1649
1650  // Make the dex2oat instruction set match that of the launching runtime. If we have multiple
1651  // architecture support, dex2oat may be compiled as a different instruction-set than that
1652  // currently being executed.
1653  std::string instruction_set("--instruction-set=");
1654  instruction_set += GetInstructionSetString(kRuntimeISA);
1655  argv->push_back(instruction_set);
1656
1657  std::unique_ptr<const InstructionSetFeatures> features(InstructionSetFeatures::FromCppDefines());
1658  std::string feature_string("--instruction-set-features=");
1659  feature_string += features->GetFeatureString();
1660  argv->push_back(feature_string);
1661}
1662
1663void Runtime::UpdateProfilerState(int state) {
1664  VLOG(profiler) << "Profiler state updated to " << state;
1665}
1666
1667void Runtime::CreateJit() {
1668  CHECK(!IsAotCompiler());
1669  if (GetInstrumentation()->IsForcedInterpretOnly()) {
1670    // Don't create JIT if forced interpret only.
1671    return;
1672  }
1673  std::string error_msg;
1674  jit_.reset(jit::Jit::Create(jit_options_.get(), &error_msg));
1675  if (jit_.get() != nullptr) {
1676    compiler_callbacks_ = jit_->GetCompilerCallbacks();
1677    jit_->CreateInstrumentationCache(jit_options_->GetCompileThreshold());
1678    jit_->CreateThreadPool();
1679  } else {
1680    LOG(WARNING) << "Failed to create JIT " << error_msg;
1681  }
1682}
1683
1684bool Runtime::CanRelocate() const {
1685  return !IsAotCompiler() || compiler_callbacks_->IsRelocationPossible();
1686}
1687
1688bool Runtime::IsCompilingBootImage() const {
1689  return IsCompiler() && compiler_callbacks_->IsBootImage();
1690}
1691
1692void Runtime::SetResolutionMethod(ArtMethod* method) {
1693  CHECK(method != nullptr);
1694  CHECK(method->IsRuntimeMethod()) << method;
1695  resolution_method_ = method;
1696}
1697
1698void Runtime::SetImtUnimplementedMethod(ArtMethod* method) {
1699  CHECK(method != nullptr);
1700  CHECK(method->IsRuntimeMethod());
1701  imt_unimplemented_method_ = method;
1702}
1703
1704}  // namespace art
1705