runtime.h revision d482e73fe26cb9161511a80e3db39e08b9808ab6
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_RUNTIME_H_
18#define ART_RUNTIME_RUNTIME_H_
19
20#include <jni.h>
21#include <stdio.h>
22
23#include <iosfwd>
24#include <set>
25#include <string>
26#include <utility>
27#include <vector>
28
29#include "arch/instruction_set.h"
30#include "base/macros.h"
31#include "base/mutex.h"
32#include "deoptimization_kind.h"
33#include "dex_file_types.h"
34#include "experimental_flags.h"
35#include "gc_root.h"
36#include "instrumentation.h"
37#include "obj_ptr.h"
38#include "offsets.h"
39#include "process_state.h"
40#include "quick/quick_method_frame_info.h"
41#include "runtime_stats.h"
42
43namespace art {
44
45namespace gc {
46  class AbstractSystemWeakHolder;
47  class Heap;
48}  // namespace gc
49
50namespace jit {
51  class Jit;
52  class JitOptions;
53}  // namespace jit
54
55namespace mirror {
56  class Array;
57  class ClassLoader;
58  class DexCache;
59  template<class T> class ObjectArray;
60  template<class T> class PrimitiveArray;
61  typedef PrimitiveArray<int8_t> ByteArray;
62  class String;
63  class Throwable;
64}  // namespace mirror
65namespace ti {
66  class Agent;
67}  // namespace ti
68namespace verifier {
69  class MethodVerifier;
70  enum class VerifyMode : int8_t;
71}  // namespace verifier
72class ArenaPool;
73class ArtMethod;
74class ClassHierarchyAnalysis;
75class ClassLinker;
76class CompilerCallbacks;
77class DexFile;
78class InternTable;
79class IsMarkedVisitor;
80class JavaVMExt;
81class LinearAlloc;
82class MemMap;
83class MonitorList;
84class MonitorPool;
85class NullPointerHandler;
86class OatFileManager;
87class Plugin;
88struct RuntimeArgumentMap;
89class RuntimeCallbacks;
90class SignalCatcher;
91class StackOverflowHandler;
92class SuspensionHandler;
93class ThreadList;
94class Trace;
95struct TraceConfig;
96class Transaction;
97
98typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
99
100class Runtime {
101 public:
102  // Parse raw runtime options.
103  static bool ParseOptions(const RuntimeOptions& raw_options,
104                           bool ignore_unrecognized,
105                           RuntimeArgumentMap* runtime_options);
106
107  // Creates and initializes a new runtime.
108  static bool Create(RuntimeArgumentMap&& runtime_options)
109      SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
110
111  // Creates and initializes a new runtime.
112  static bool Create(const RuntimeOptions& raw_options, bool ignore_unrecognized)
113      SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
114
115  // IsAotCompiler for compilers that don't have a running runtime. Only dex2oat currently.
116  bool IsAotCompiler() const {
117    return !UseJitCompilation() && IsCompiler();
118  }
119
120  // IsCompiler is any runtime which has a running compiler, either dex2oat or JIT.
121  bool IsCompiler() const {
122    return compiler_callbacks_ != nullptr;
123  }
124
125  // If a compiler, are we compiling a boot image?
126  bool IsCompilingBootImage() const;
127
128  bool CanRelocate() const;
129
130  bool ShouldRelocate() const {
131    return must_relocate_ && CanRelocate();
132  }
133
134  bool MustRelocateIfPossible() const {
135    return must_relocate_;
136  }
137
138  bool IsDex2OatEnabled() const {
139    return dex2oat_enabled_ && IsImageDex2OatEnabled();
140  }
141
142  bool IsImageDex2OatEnabled() const {
143    return image_dex2oat_enabled_;
144  }
145
146  CompilerCallbacks* GetCompilerCallbacks() {
147    return compiler_callbacks_;
148  }
149
150  void SetCompilerCallbacks(CompilerCallbacks* callbacks) {
151    CHECK(callbacks != nullptr);
152    compiler_callbacks_ = callbacks;
153  }
154
155  bool IsZygote() const {
156    return is_zygote_;
157  }
158
159  bool IsExplicitGcDisabled() const {
160    return is_explicit_gc_disabled_;
161  }
162
163  std::string GetCompilerExecutable() const;
164  std::string GetPatchoatExecutable() const;
165
166  const std::vector<std::string>& GetCompilerOptions() const {
167    return compiler_options_;
168  }
169
170  void AddCompilerOption(const std::string& option) {
171    compiler_options_.push_back(option);
172  }
173
174  const std::vector<std::string>& GetImageCompilerOptions() const {
175    return image_compiler_options_;
176  }
177
178  const std::string& GetImageLocation() const {
179    return image_location_;
180  }
181
182  // Starts a runtime, which may cause threads to be started and code to run.
183  bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_);
184
185  bool IsShuttingDown(Thread* self);
186  bool IsShuttingDownLocked() const REQUIRES(Locks::runtime_shutdown_lock_) {
187    return shutting_down_;
188  }
189
190  size_t NumberOfThreadsBeingBorn() const REQUIRES(Locks::runtime_shutdown_lock_) {
191    return threads_being_born_;
192  }
193
194  void StartThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) {
195    threads_being_born_++;
196  }
197
198  void EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_);
199
200  bool IsStarted() const {
201    return started_;
202  }
203
204  bool IsFinishedStarting() const {
205    return finished_starting_;
206  }
207
208  static Runtime* Current() {
209    return instance_;
210  }
211
212  // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
213  // callers should prefer.
214  NO_RETURN static void Abort(const char* msg) REQUIRES(!Locks::abort_lock_);
215
216  // Returns the "main" ThreadGroup, used when attaching user threads.
217  jobject GetMainThreadGroup() const;
218
219  // Returns the "system" ThreadGroup, used when attaching our internal threads.
220  jobject GetSystemThreadGroup() const;
221
222  // Returns the system ClassLoader which represents the CLASSPATH.
223  jobject GetSystemClassLoader() const;
224
225  // Attaches the calling native thread to the runtime.
226  bool AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
227                           bool create_peer);
228
229  void CallExitHook(jint status);
230
231  // Detaches the current native thread from the runtime.
232  void DetachCurrentThread() REQUIRES(!Locks::mutator_lock_);
233
234  void DumpDeoptimizations(std::ostream& os);
235  void DumpForSigQuit(std::ostream& os);
236  void DumpLockHolders(std::ostream& os);
237
238  ~Runtime();
239
240  const std::string& GetBootClassPathString() const {
241    return boot_class_path_string_;
242  }
243
244  const std::string& GetClassPathString() const {
245    return class_path_string_;
246  }
247
248  ClassLinker* GetClassLinker() const {
249    return class_linker_;
250  }
251
252  size_t GetDefaultStackSize() const {
253    return default_stack_size_;
254  }
255
256  gc::Heap* GetHeap() const {
257    return heap_;
258  }
259
260  InternTable* GetInternTable() const {
261    DCHECK(intern_table_ != nullptr);
262    return intern_table_;
263  }
264
265  JavaVMExt* GetJavaVM() const {
266    return java_vm_.get();
267  }
268
269  size_t GetMaxSpinsBeforeThinLockInflation() const {
270    return max_spins_before_thin_lock_inflation_;
271  }
272
273  MonitorList* GetMonitorList() const {
274    return monitor_list_;
275  }
276
277  MonitorPool* GetMonitorPool() const {
278    return monitor_pool_;
279  }
280
281  // Is the given object the special object used to mark a cleared JNI weak global?
282  bool IsClearedJniWeakGlobal(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
283
284  // Get the special object used to mark a cleared JNI weak global.
285  mirror::Object* GetClearedJniWeakGlobal() REQUIRES_SHARED(Locks::mutator_lock_);
286
287  mirror::Throwable* GetPreAllocatedOutOfMemoryError() REQUIRES_SHARED(Locks::mutator_lock_);
288
289  mirror::Throwable* GetPreAllocatedNoClassDefFoundError()
290      REQUIRES_SHARED(Locks::mutator_lock_);
291
292  const std::vector<std::string>& GetProperties() const {
293    return properties_;
294  }
295
296  ThreadList* GetThreadList() const {
297    return thread_list_;
298  }
299
300  static const char* GetVersion() {
301    return "2.1.0";
302  }
303
304  bool IsMethodHandlesEnabled() const {
305    return true;
306  }
307
308  void DisallowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
309  void AllowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
310  // broadcast_for_checkpoint is true when we broadcast for making blocking threads to respond to
311  // checkpoint requests. It's false when we broadcast to unblock blocking threads after system weak
312  // access is reenabled.
313  void BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint = false);
314
315  // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
316  // clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
317  void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots)
318      REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
319      REQUIRES_SHARED(Locks::mutator_lock_);
320
321  // Visit image roots, only used for hprof since the GC uses the image space mod union table
322  // instead.
323  void VisitImageRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
324
325  // Visit all of the roots we can do safely do concurrently.
326  void VisitConcurrentRoots(RootVisitor* visitor,
327                            VisitRootFlags flags = kVisitRootFlagAllRoots)
328      REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
329      REQUIRES_SHARED(Locks::mutator_lock_);
330
331  // Visit all of the non thread roots, we can do this with mutators unpaused.
332  void VisitNonThreadRoots(RootVisitor* visitor)
333      REQUIRES_SHARED(Locks::mutator_lock_);
334
335  void VisitTransactionRoots(RootVisitor* visitor)
336      REQUIRES_SHARED(Locks::mutator_lock_);
337
338  // Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the
339  // system weak is updated to be the visitor's returned value.
340  void SweepSystemWeaks(IsMarkedVisitor* visitor)
341      REQUIRES_SHARED(Locks::mutator_lock_);
342
343  // Returns a special method that calls into a trampoline for runtime method resolution
344  ArtMethod* GetResolutionMethod();
345
346  bool HasResolutionMethod() const {
347    return resolution_method_ != nullptr;
348  }
349
350  void SetResolutionMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
351  void ClearResolutionMethod() {
352    resolution_method_ = nullptr;
353  }
354
355  ArtMethod* CreateResolutionMethod() REQUIRES_SHARED(Locks::mutator_lock_);
356
357  // Returns a special method that calls into a trampoline for runtime imt conflicts.
358  ArtMethod* GetImtConflictMethod();
359  ArtMethod* GetImtUnimplementedMethod();
360
361  bool HasImtConflictMethod() const {
362    return imt_conflict_method_ != nullptr;
363  }
364
365  void ClearImtConflictMethod() {
366    imt_conflict_method_ = nullptr;
367  }
368
369  void FixupConflictTables();
370  void SetImtConflictMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
371  void SetImtUnimplementedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
372
373  ArtMethod* CreateImtConflictMethod(LinearAlloc* linear_alloc)
374      REQUIRES_SHARED(Locks::mutator_lock_);
375
376  void ClearImtUnimplementedMethod() {
377    imt_unimplemented_method_ = nullptr;
378  }
379
380  // Returns a special method that describes all callee saves being spilled to the stack.
381  enum CalleeSaveType {
382    kSaveAllCalleeSaves,  // All callee-save registers.
383    kSaveRefsOnly,        // Only those callee-save registers that can hold references.
384    kSaveRefsAndArgs,     // References (see above) and arguments (usually caller-save registers).
385    kSaveEverything,      // All registers, including both callee-save and caller-save.
386    kLastCalleeSaveType   // Value used for iteration
387  };
388
389  bool HasCalleeSaveMethod(CalleeSaveType type) const {
390    return callee_save_methods_[type] != 0u;
391  }
392
393  ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
394      REQUIRES_SHARED(Locks::mutator_lock_);
395
396  ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
397      REQUIRES_SHARED(Locks::mutator_lock_);
398
399  QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const {
400    return callee_save_method_frame_infos_[type];
401  }
402
403  QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
404      REQUIRES_SHARED(Locks::mutator_lock_);
405
406  static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
407    return OFFSETOF_MEMBER(Runtime, callee_save_methods_[type]);
408  }
409
410  InstructionSet GetInstructionSet() const {
411    return instruction_set_;
412  }
413
414  void SetInstructionSet(InstructionSet instruction_set);
415  void ClearInstructionSet();
416
417  void SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type);
418  void ClearCalleeSaveMethods();
419
420  ArtMethod* CreateCalleeSaveMethod() REQUIRES_SHARED(Locks::mutator_lock_);
421
422  int32_t GetStat(int kind);
423
424  RuntimeStats* GetStats() {
425    return &stats_;
426  }
427
428  bool HasStatsEnabled() const {
429    return stats_enabled_;
430  }
431
432  void ResetStats(int kinds);
433
434  void SetStatsEnabled(bool new_state)
435      REQUIRES(!Locks::instrument_entrypoints_lock_, !Locks::mutator_lock_);
436
437  enum class NativeBridgeAction {  // private
438    kUnload,
439    kInitialize
440  };
441
442  jit::Jit* GetJit() const {
443    return jit_.get();
444  }
445
446  // Returns true if JIT compilations are enabled. GetJit() will be not null in this case.
447  bool UseJitCompilation() const;
448
449  void PreZygoteFork();
450  void InitNonZygoteOrPostFork(
451      JNIEnv* env, bool is_system_server, NativeBridgeAction action, const char* isa);
452
453  const instrumentation::Instrumentation* GetInstrumentation() const {
454    return &instrumentation_;
455  }
456
457  instrumentation::Instrumentation* GetInstrumentation() {
458    return &instrumentation_;
459  }
460
461  void RegisterAppInfo(const std::vector<std::string>& code_paths,
462                       const std::string& profile_output_filename);
463
464  // Transaction support.
465  bool IsActiveTransaction() const {
466    return preinitialization_transaction_ != nullptr;
467  }
468  void EnterTransactionMode(Transaction* transaction);
469  void ExitTransactionMode();
470  bool IsTransactionAborted() const;
471
472  void AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message)
473      REQUIRES_SHARED(Locks::mutator_lock_);
474  void ThrowTransactionAbortError(Thread* self)
475      REQUIRES_SHARED(Locks::mutator_lock_);
476
477  void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
478                               bool is_volatile) const;
479  void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value,
480                            bool is_volatile) const;
481  void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value,
482                            bool is_volatile) const;
483  void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value,
484                          bool is_volatile) const;
485  void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
486                          bool is_volatile) const;
487  void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
488                          bool is_volatile) const;
489  void RecordWriteFieldReference(mirror::Object* obj,
490                                 MemberOffset field_offset,
491                                 ObjPtr<mirror::Object> value,
492                                 bool is_volatile) const
493      REQUIRES_SHARED(Locks::mutator_lock_);
494  void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const
495      REQUIRES_SHARED(Locks::mutator_lock_);
496  void RecordStrongStringInsertion(ObjPtr<mirror::String> s) const
497      REQUIRES(Locks::intern_table_lock_);
498  void RecordWeakStringInsertion(ObjPtr<mirror::String> s) const
499      REQUIRES(Locks::intern_table_lock_);
500  void RecordStrongStringRemoval(ObjPtr<mirror::String> s) const
501      REQUIRES(Locks::intern_table_lock_);
502  void RecordWeakStringRemoval(ObjPtr<mirror::String> s) const
503      REQUIRES(Locks::intern_table_lock_);
504  void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx) const
505      REQUIRES_SHARED(Locks::mutator_lock_);
506
507  void SetFaultMessage(const std::string& message) REQUIRES(!fault_message_lock_);
508  // Only read by the signal handler, NO_THREAD_SAFETY_ANALYSIS to prevent lock order violations
509  // with the unexpected_signal_lock_.
510  const std::string& GetFaultMessage() NO_THREAD_SAFETY_ANALYSIS {
511    return fault_message_;
512  }
513
514  void AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* arg_vector) const;
515
516  bool ExplicitStackOverflowChecks() const {
517    return !implicit_so_checks_;
518  }
519
520  bool IsVerificationEnabled() const;
521  bool IsVerificationSoftFail() const;
522
523  bool IsDexFileFallbackEnabled() const {
524    return allow_dex_file_fallback_;
525  }
526
527  const std::vector<std::string>& GetCpuAbilist() const {
528    return cpu_abilist_;
529  }
530
531  bool IsRunningOnMemoryTool() const {
532    return is_running_on_memory_tool_;
533  }
534
535  void SetTargetSdkVersion(int32_t version) {
536    target_sdk_version_ = version;
537  }
538
539  int32_t GetTargetSdkVersion() const {
540    return target_sdk_version_;
541  }
542
543  uint32_t GetZygoteMaxFailedBoots() const {
544    return zygote_max_failed_boots_;
545  }
546
547  bool AreExperimentalFlagsEnabled(ExperimentalFlags flags) {
548    return (experimental_flags_ & flags) != ExperimentalFlags::kNone;
549  }
550
551  // Create the JIT and instrumentation and code cache.
552  void CreateJit();
553
554  ArenaPool* GetArenaPool() {
555    return arena_pool_.get();
556  }
557  ArenaPool* GetJitArenaPool() {
558    return jit_arena_pool_.get();
559  }
560  const ArenaPool* GetArenaPool() const {
561    return arena_pool_.get();
562  }
563
564  void ReclaimArenaPoolMemory();
565
566  LinearAlloc* GetLinearAlloc() {
567    return linear_alloc_.get();
568  }
569
570  jit::JitOptions* GetJITOptions() {
571    return jit_options_.get();
572  }
573
574  bool IsJavaDebuggable() const {
575    return is_java_debuggable_;
576  }
577
578  void SetJavaDebuggable(bool value);
579
580  // Deoptimize the boot image, called for Java debuggable apps.
581  void DeoptimizeBootImage();
582
583  bool IsNativeDebuggable() const {
584    return is_native_debuggable_;
585  }
586
587  void SetNativeDebuggable(bool value) {
588    is_native_debuggable_ = value;
589  }
590
591  // Returns the build fingerprint, if set. Otherwise an empty string is returned.
592  std::string GetFingerprint() {
593    return fingerprint_;
594  }
595
596  // Called from class linker.
597  void SetSentinel(mirror::Object* sentinel) REQUIRES_SHARED(Locks::mutator_lock_);
598
599  // Create a normal LinearAlloc or low 4gb version if we are 64 bit AOT compiler.
600  LinearAlloc* CreateLinearAlloc();
601
602  OatFileManager& GetOatFileManager() const {
603    DCHECK(oat_file_manager_ != nullptr);
604    return *oat_file_manager_;
605  }
606
607  double GetHashTableMinLoadFactor() const;
608  double GetHashTableMaxLoadFactor() const;
609
610  void SetSafeMode(bool mode) {
611    safe_mode_ = mode;
612  }
613
614  bool GetDumpNativeStackOnSigQuit() const {
615    return dump_native_stack_on_sig_quit_;
616  }
617
618  bool GetPrunedDalvikCache() const {
619    return pruned_dalvik_cache_;
620  }
621
622  void SetPrunedDalvikCache(bool pruned) {
623    pruned_dalvik_cache_ = pruned;
624  }
625
626  void UpdateProcessState(ProcessState process_state);
627
628  // Returns true if we currently care about long mutator pause.
629  bool InJankPerceptibleProcessState() const {
630    return process_state_ == kProcessStateJankPerceptible;
631  }
632
633  void RegisterSensitiveThread() const;
634
635  void SetZygoteNoThreadSection(bool val) {
636    zygote_no_threads_ = val;
637  }
638
639  bool IsZygoteNoThreadSection() const {
640    return zygote_no_threads_;
641  }
642
643  // Returns if the code can be deoptimized asynchronously. Code may be compiled with some
644  // optimization that makes it impossible to deoptimize.
645  bool IsAsyncDeoptimizeable(uintptr_t code) const REQUIRES_SHARED(Locks::mutator_lock_);
646
647  // Returns a saved copy of the environment (getenv/setenv values).
648  // Used by Fork to protect against overwriting LD_LIBRARY_PATH, etc.
649  char** GetEnvSnapshot() const {
650    return env_snapshot_.GetSnapshot();
651  }
652
653  void AddSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
654  void RemoveSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
655
656  ClassHierarchyAnalysis* GetClassHierarchyAnalysis() {
657    return cha_;
658  }
659
660  NO_RETURN
661  static void Aborter(const char* abort_message);
662
663  void AttachAgent(const std::string& agent_arg);
664
665  const std::list<ti::Agent>& GetAgents() const {
666    return agents_;
667  }
668
669  RuntimeCallbacks* GetRuntimeCallbacks();
670
671  void InitThreadGroups(Thread* self);
672
673  void SetDumpGCPerformanceOnShutdown(bool value) {
674    dump_gc_performance_on_shutdown_ = value;
675  }
676
677  void IncrementDeoptimizationCount(DeoptimizationKind kind) {
678    DCHECK_LE(kind, DeoptimizationKind::kLast);
679    deoptimization_counts_[static_cast<size_t>(kind)]++;
680  }
681
682  uint32_t GetNumberOfDeoptimizations() const {
683    uint32_t result = 0;
684    for (size_t i = 0; i <= static_cast<size_t>(DeoptimizationKind::kLast); ++i) {
685      result += deoptimization_counts_[i];
686    }
687    return result;
688  }
689
690 private:
691  static void InitPlatformSignalHandlers();
692
693  Runtime();
694
695  void BlockSignals();
696
697  bool Init(RuntimeArgumentMap&& runtime_options)
698      SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
699  void InitNativeMethods() REQUIRES(!Locks::mutator_lock_);
700  void RegisterRuntimeNativeMethods(JNIEnv* env);
701
702  void StartDaemonThreads();
703  void StartSignalCatcher();
704
705  void MaybeSaveJitProfilingInfo();
706
707  // Visit all of the thread roots.
708  void VisitThreadRoots(RootVisitor* visitor, VisitRootFlags flags)
709      REQUIRES_SHARED(Locks::mutator_lock_);
710
711  // Visit all other roots which must be done with mutators suspended.
712  void VisitNonConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags)
713      REQUIRES_SHARED(Locks::mutator_lock_);
714
715  // Constant roots are the roots which never change after the runtime is initialized, they only
716  // need to be visited once per GC cycle.
717  void VisitConstantRoots(RootVisitor* visitor)
718      REQUIRES_SHARED(Locks::mutator_lock_);
719
720  // A pointer to the active runtime or null.
721  static Runtime* instance_;
722
723  // NOTE: these must match the gc::ProcessState values as they come directly from the framework.
724  static constexpr int kProfileForground = 0;
725  static constexpr int kProfileBackground = 1;
726
727  // 64 bit so that we can share the same asm offsets for both 32 and 64 bits.
728  uint64_t callee_save_methods_[kLastCalleeSaveType];
729  GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_;
730  GcRoot<mirror::Throwable> pre_allocated_NoClassDefFoundError_;
731  ArtMethod* resolution_method_;
732  ArtMethod* imt_conflict_method_;
733  // Unresolved method has the same behavior as the conflict method, it is used by the class linker
734  // for differentiating between unfilled imt slots vs conflict slots in superclasses.
735  ArtMethod* imt_unimplemented_method_;
736
737  // Special sentinel object used to invalid conditions in JNI (cleared weak references) and
738  // JDWP (invalid references).
739  GcRoot<mirror::Object> sentinel_;
740
741  InstructionSet instruction_set_;
742  QuickMethodFrameInfo callee_save_method_frame_infos_[kLastCalleeSaveType];
743
744  CompilerCallbacks* compiler_callbacks_;
745  bool is_zygote_;
746  bool must_relocate_;
747  bool is_concurrent_gc_enabled_;
748  bool is_explicit_gc_disabled_;
749  bool dex2oat_enabled_;
750  bool image_dex2oat_enabled_;
751
752  std::string compiler_executable_;
753  std::string patchoat_executable_;
754  std::vector<std::string> compiler_options_;
755  std::vector<std::string> image_compiler_options_;
756  std::string image_location_;
757
758  std::string boot_class_path_string_;
759  std::string class_path_string_;
760  std::vector<std::string> properties_;
761
762  std::list<ti::Agent> agents_;
763  std::vector<Plugin> plugins_;
764
765  // The default stack size for managed threads created by the runtime.
766  size_t default_stack_size_;
767
768  gc::Heap* heap_;
769
770  std::unique_ptr<ArenaPool> jit_arena_pool_;
771  std::unique_ptr<ArenaPool> arena_pool_;
772  // Special low 4gb pool for compiler linear alloc. We need ArtFields to be in low 4gb if we are
773  // compiling using a 32 bit image on a 64 bit compiler in case we resolve things in the image
774  // since the field arrays are int arrays in this case.
775  std::unique_ptr<ArenaPool> low_4gb_arena_pool_;
776
777  // Shared linear alloc for now.
778  std::unique_ptr<LinearAlloc> linear_alloc_;
779
780  // The number of spins that are done before thread suspension is used to forcibly inflate.
781  size_t max_spins_before_thin_lock_inflation_;
782  MonitorList* monitor_list_;
783  MonitorPool* monitor_pool_;
784
785  ThreadList* thread_list_;
786
787  InternTable* intern_table_;
788
789  ClassLinker* class_linker_;
790
791  SignalCatcher* signal_catcher_;
792
793  // If true, the runtime will connect to tombstoned via a socket to
794  // request an open file descriptor to write its traces to.
795  bool use_tombstoned_traces_;
796
797  // Location to which traces must be written on SIGQUIT. Only used if
798  // tombstoned_traces_ == false.
799  std::string stack_trace_file_;
800
801  std::unique_ptr<JavaVMExt> java_vm_;
802
803  std::unique_ptr<jit::Jit> jit_;
804  std::unique_ptr<jit::JitOptions> jit_options_;
805
806  // Fault message, printed when we get a SIGSEGV.
807  Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
808  std::string fault_message_ GUARDED_BY(fault_message_lock_);
809
810  // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by
811  // the shutdown lock so that threads aren't born while we're shutting down.
812  size_t threads_being_born_ GUARDED_BY(Locks::runtime_shutdown_lock_);
813
814  // Waited upon until no threads are being born.
815  std::unique_ptr<ConditionVariable> shutdown_cond_ GUARDED_BY(Locks::runtime_shutdown_lock_);
816
817  // Set when runtime shutdown is past the point that new threads may attach.
818  bool shutting_down_ GUARDED_BY(Locks::runtime_shutdown_lock_);
819
820  // The runtime is starting to shutdown but is blocked waiting on shutdown_cond_.
821  bool shutting_down_started_ GUARDED_BY(Locks::runtime_shutdown_lock_);
822
823  bool started_;
824
825  // New flag added which tells us if the runtime has finished starting. If
826  // this flag is set then the Daemon threads are created and the class loader
827  // is created. This flag is needed for knowing if its safe to request CMS.
828  bool finished_starting_;
829
830  // Hooks supported by JNI_CreateJavaVM
831  jint (*vfprintf_)(FILE* stream, const char* format, va_list ap);
832  void (*exit_)(jint status);
833  void (*abort_)();
834
835  bool stats_enabled_;
836  RuntimeStats stats_;
837
838  const bool is_running_on_memory_tool_;
839
840  std::unique_ptr<TraceConfig> trace_config_;
841
842  instrumentation::Instrumentation instrumentation_;
843
844  jobject main_thread_group_;
845  jobject system_thread_group_;
846
847  // As returned by ClassLoader.getSystemClassLoader().
848  jobject system_class_loader_;
849
850  // If true, then we dump the GC cumulative timings on shutdown.
851  bool dump_gc_performance_on_shutdown_;
852
853  // Transaction used for pre-initializing classes at compilation time.
854  Transaction* preinitialization_transaction_;
855
856  // If kNone, verification is disabled. kEnable by default.
857  verifier::VerifyMode verify_;
858
859  // If true, the runtime may use dex files directly with the interpreter if an oat file is not
860  // available/usable.
861  bool allow_dex_file_fallback_;
862
863  // List of supported cpu abis.
864  std::vector<std::string> cpu_abilist_;
865
866  // Specifies target SDK version to allow workarounds for certain API levels.
867  int32_t target_sdk_version_;
868
869  // Implicit checks flags.
870  bool implicit_null_checks_;       // NullPointer checks are implicit.
871  bool implicit_so_checks_;         // StackOverflow checks are implicit.
872  bool implicit_suspend_checks_;    // Thread suspension checks are implicit.
873
874  // Whether or not the sig chain (and implicitly the fault handler) should be
875  // disabled. Tools like dex2oat or patchoat don't need them. This enables
876  // building a statically link version of dex2oat.
877  bool no_sig_chain_;
878
879  // Force the use of native bridge even if the app ISA matches the runtime ISA.
880  bool force_native_bridge_;
881
882  // Whether or not a native bridge has been loaded.
883  //
884  // The native bridge allows running native code compiled for a foreign ISA. The way it works is,
885  // if standard dlopen fails to load native library associated with native activity, it calls to
886  // the native bridge to load it and then gets the trampoline for the entry to native activity.
887  //
888  // The option 'native_bridge_library_filename' specifies the name of the native bridge.
889  // When non-empty the native bridge will be loaded from the given file. An empty value means
890  // that there's no native bridge.
891  bool is_native_bridge_loaded_;
892
893  // Whether we are running under native debugger.
894  bool is_native_debuggable_;
895
896  // Whether Java code needs to be debuggable.
897  bool is_java_debuggable_;
898
899  // The maximum number of failed boots we allow before pruning the dalvik cache
900  // and trying again. This option is only inspected when we're running as a
901  // zygote.
902  uint32_t zygote_max_failed_boots_;
903
904  // Enable experimental opcodes that aren't fully specified yet. The intent is to
905  // eventually publish them as public-usable opcodes, but they aren't ready yet.
906  //
907  // Experimental opcodes should not be used by other production code.
908  ExperimentalFlags experimental_flags_;
909
910  // Contains the build fingerprint, if given as a parameter.
911  std::string fingerprint_;
912
913  // Oat file manager, keeps track of what oat files are open.
914  OatFileManager* oat_file_manager_;
915
916  // Whether or not we are on a low RAM device.
917  bool is_low_memory_mode_;
918
919  // Whether the application should run in safe mode, that is, interpreter only.
920  bool safe_mode_;
921
922  // Whether threads should dump their native stack on SIGQUIT.
923  bool dump_native_stack_on_sig_quit_;
924
925  // Whether the dalvik cache was pruned when initializing the runtime.
926  bool pruned_dalvik_cache_;
927
928  // Whether or not we currently care about pause times.
929  ProcessState process_state_;
930
931  // Whether zygote code is in a section that should not start threads.
932  bool zygote_no_threads_;
933
934  // Saved environment.
935  class EnvSnapshot {
936   public:
937    EnvSnapshot() = default;
938    void TakeSnapshot();
939    char** GetSnapshot() const;
940
941   private:
942    std::unique_ptr<char*[]> c_env_vector_;
943    std::vector<std::unique_ptr<std::string>> name_value_pairs_;
944
945    DISALLOW_COPY_AND_ASSIGN(EnvSnapshot);
946  } env_snapshot_;
947
948  // Generic system-weak holders.
949  std::vector<gc::AbstractSystemWeakHolder*> system_weak_holders_;
950
951  ClassHierarchyAnalysis* cha_;
952
953  std::unique_ptr<RuntimeCallbacks> callbacks_;
954
955  std::atomic<uint32_t> deoptimization_counts_[
956      static_cast<uint32_t>(DeoptimizationKind::kLast) + 1];
957
958  std::unique_ptr<MemMap> protected_fault_page_;
959
960  DISALLOW_COPY_AND_ASSIGN(Runtime);
961};
962std::ostream& operator<<(std::ostream& os, const Runtime::CalleeSaveType& rhs);
963
964}  // namespace art
965
966#endif  // ART_RUNTIME_RUNTIME_H_
967