runtime.h revision 5a61bb7969347ffe8e0bf4f4dff841cc6c21ed85
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_RUNTIME_H_
18#define ART_RUNTIME_RUNTIME_H_
19
20#include <jni.h>
21#include <stdio.h>
22
23#include <iosfwd>
24#include <set>
25#include <string>
26#include <utility>
27#include <memory>
28#include <vector>
29
30#include "arch/instruction_set.h"
31#include "base/macros.h"
32#include "base/mutex.h"
33#include "deoptimization_kind.h"
34#include "dex/dex_file_types.h"
35#include "experimental_flags.h"
36#include "gc_root.h"
37#include "instrumentation.h"
38#include "jdwp_provider.h"
39#include "obj_ptr.h"
40#include "offsets.h"
41#include "process_state.h"
42#include "quick/quick_method_frame_info.h"
43#include "runtime_stats.h"
44
45namespace art {
46
47namespace gc {
48class AbstractSystemWeakHolder;
49class Heap;
50}  // namespace gc
51
52namespace jit {
53class Jit;
54class JitOptions;
55}  // namespace jit
56
57namespace mirror {
58class Array;
59class ClassLoader;
60class DexCache;
61template<class T> class ObjectArray;
62template<class T> class PrimitiveArray;
63typedef PrimitiveArray<int8_t> ByteArray;
64class String;
65class Throwable;
66}  // namespace mirror
67namespace ti {
68class Agent;
69class AgentSpec;
70}  // namespace ti
71namespace verifier {
72class MethodVerifier;
73enum class VerifyMode : int8_t;
74}  // namespace verifier
75class ArenaPool;
76class ArtMethod;
77enum class CalleeSaveType: uint32_t;
78class ClassLinker;
79class CompilerCallbacks;
80class DexFile;
81class InternTable;
82class IsMarkedVisitor;
83class JavaVMExt;
84class LinearAlloc;
85class MemMap;
86class MonitorList;
87class MonitorPool;
88class NullPointerHandler;
89class OatFileManager;
90class Plugin;
91struct RuntimeArgumentMap;
92class RuntimeCallbacks;
93class SignalCatcher;
94class StackOverflowHandler;
95class SuspensionHandler;
96class ThreadList;
97class Trace;
98struct TraceConfig;
99class Transaction;
100
101typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
102
103class Runtime {
104 public:
105  // Parse raw runtime options.
106  static bool ParseOptions(const RuntimeOptions& raw_options,
107                           bool ignore_unrecognized,
108                           RuntimeArgumentMap* runtime_options);
109
110  // Creates and initializes a new runtime.
111  static bool Create(RuntimeArgumentMap&& runtime_options)
112      SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
113
114  // Creates and initializes a new runtime.
115  static bool Create(const RuntimeOptions& raw_options, bool ignore_unrecognized)
116      SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
117
118  // IsAotCompiler for compilers that don't have a running runtime. Only dex2oat currently.
119  bool IsAotCompiler() const {
120    return !UseJitCompilation() && IsCompiler();
121  }
122
123  // IsCompiler is any runtime which has a running compiler, either dex2oat or JIT.
124  bool IsCompiler() const {
125    return compiler_callbacks_ != nullptr;
126  }
127
128  // If a compiler, are we compiling a boot image?
129  bool IsCompilingBootImage() const;
130
131  bool CanRelocate() const;
132
133  bool ShouldRelocate() const {
134    return must_relocate_ && CanRelocate();
135  }
136
137  bool MustRelocateIfPossible() const {
138    return must_relocate_;
139  }
140
141  bool IsDex2OatEnabled() const {
142    return dex2oat_enabled_ && IsImageDex2OatEnabled();
143  }
144
145  bool IsImageDex2OatEnabled() const {
146    return image_dex2oat_enabled_;
147  }
148
149  CompilerCallbacks* GetCompilerCallbacks() {
150    return compiler_callbacks_;
151  }
152
153  void SetCompilerCallbacks(CompilerCallbacks* callbacks) {
154    CHECK(callbacks != nullptr);
155    compiler_callbacks_ = callbacks;
156  }
157
158  bool IsZygote() const {
159    return is_zygote_;
160  }
161
162  bool IsExplicitGcDisabled() const {
163    return is_explicit_gc_disabled_;
164  }
165
166  std::string GetCompilerExecutable() const;
167  std::string GetPatchoatExecutable() const;
168
169  const std::vector<std::string>& GetCompilerOptions() const {
170    return compiler_options_;
171  }
172
173  void AddCompilerOption(const std::string& option) {
174    compiler_options_.push_back(option);
175  }
176
177  const std::vector<std::string>& GetImageCompilerOptions() const {
178    return image_compiler_options_;
179  }
180
181  const std::string& GetImageLocation() const {
182    return image_location_;
183  }
184
185  // Starts a runtime, which may cause threads to be started and code to run.
186  bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_);
187
188  bool IsShuttingDown(Thread* self);
189  bool IsShuttingDownLocked() const REQUIRES(Locks::runtime_shutdown_lock_) {
190    return shutting_down_;
191  }
192
193  size_t NumberOfThreadsBeingBorn() const REQUIRES(Locks::runtime_shutdown_lock_) {
194    return threads_being_born_;
195  }
196
197  void StartThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) {
198    threads_being_born_++;
199  }
200
201  void EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_);
202
203  bool IsStarted() const {
204    return started_;
205  }
206
207  bool IsFinishedStarting() const {
208    return finished_starting_;
209  }
210
211  static Runtime* Current() {
212    return instance_;
213  }
214
215  // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
216  // callers should prefer.
217  NO_RETURN static void Abort(const char* msg) REQUIRES(!Locks::abort_lock_);
218
219  // Returns the "main" ThreadGroup, used when attaching user threads.
220  jobject GetMainThreadGroup() const;
221
222  // Returns the "system" ThreadGroup, used when attaching our internal threads.
223  jobject GetSystemThreadGroup() const;
224
225  // Returns the system ClassLoader which represents the CLASSPATH.
226  jobject GetSystemClassLoader() const;
227
228  // Attaches the calling native thread to the runtime.
229  bool AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
230                           bool create_peer);
231
232  void CallExitHook(jint status);
233
234  // Detaches the current native thread from the runtime.
235  void DetachCurrentThread() REQUIRES(!Locks::mutator_lock_);
236
237  void DumpDeoptimizations(std::ostream& os);
238  void DumpForSigQuit(std::ostream& os);
239  void DumpLockHolders(std::ostream& os);
240
241  ~Runtime();
242
243  const std::string& GetBootClassPathString() const {
244    return boot_class_path_string_;
245  }
246
247  const std::string& GetClassPathString() const {
248    return class_path_string_;
249  }
250
251  ClassLinker* GetClassLinker() const {
252    return class_linker_;
253  }
254
255  size_t GetDefaultStackSize() const {
256    return default_stack_size_;
257  }
258
259  gc::Heap* GetHeap() const {
260    return heap_;
261  }
262
263  InternTable* GetInternTable() const {
264    DCHECK(intern_table_ != nullptr);
265    return intern_table_;
266  }
267
268  JavaVMExt* GetJavaVM() const {
269    return java_vm_.get();
270  }
271
272  size_t GetMaxSpinsBeforeThinLockInflation() const {
273    return max_spins_before_thin_lock_inflation_;
274  }
275
276  MonitorList* GetMonitorList() const {
277    return monitor_list_;
278  }
279
280  MonitorPool* GetMonitorPool() const {
281    return monitor_pool_;
282  }
283
284  // Is the given object the special object used to mark a cleared JNI weak global?
285  bool IsClearedJniWeakGlobal(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
286
287  // Get the special object used to mark a cleared JNI weak global.
288  mirror::Object* GetClearedJniWeakGlobal() REQUIRES_SHARED(Locks::mutator_lock_);
289
290  mirror::Throwable* GetPreAllocatedOutOfMemoryError() REQUIRES_SHARED(Locks::mutator_lock_);
291
292  mirror::Throwable* GetPreAllocatedNoClassDefFoundError()
293      REQUIRES_SHARED(Locks::mutator_lock_);
294
295  const std::vector<std::string>& GetProperties() const {
296    return properties_;
297  }
298
299  ThreadList* GetThreadList() const {
300    return thread_list_;
301  }
302
303  static const char* GetVersion() {
304    return "2.1.0";
305  }
306
307  bool IsMethodHandlesEnabled() const {
308    return true;
309  }
310
311  void DisallowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
312  void AllowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
313  // broadcast_for_checkpoint is true when we broadcast for making blocking threads to respond to
314  // checkpoint requests. It's false when we broadcast to unblock blocking threads after system weak
315  // access is reenabled.
316  void BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint = false);
317
318  // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
319  // clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
320  void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots)
321      REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
322      REQUIRES_SHARED(Locks::mutator_lock_);
323
324  // Visit image roots, only used for hprof since the GC uses the image space mod union table
325  // instead.
326  void VisitImageRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
327
328  // Visit all of the roots we can do safely do concurrently.
329  void VisitConcurrentRoots(RootVisitor* visitor,
330                            VisitRootFlags flags = kVisitRootFlagAllRoots)
331      REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
332      REQUIRES_SHARED(Locks::mutator_lock_);
333
334  // Visit all of the non thread roots, we can do this with mutators unpaused.
335  void VisitNonThreadRoots(RootVisitor* visitor)
336      REQUIRES_SHARED(Locks::mutator_lock_);
337
338  void VisitTransactionRoots(RootVisitor* visitor)
339      REQUIRES_SHARED(Locks::mutator_lock_);
340
341  // Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the
342  // system weak is updated to be the visitor's returned value.
343  void SweepSystemWeaks(IsMarkedVisitor* visitor)
344      REQUIRES_SHARED(Locks::mutator_lock_);
345
346  // Returns a special method that calls into a trampoline for runtime method resolution
347  ArtMethod* GetResolutionMethod();
348
349  bool HasResolutionMethod() const {
350    return resolution_method_ != nullptr;
351  }
352
353  void SetResolutionMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
354  void ClearResolutionMethod() {
355    resolution_method_ = nullptr;
356  }
357
358  ArtMethod* CreateResolutionMethod() REQUIRES_SHARED(Locks::mutator_lock_);
359
360  // Returns a special method that calls into a trampoline for runtime imt conflicts.
361  ArtMethod* GetImtConflictMethod();
362  ArtMethod* GetImtUnimplementedMethod();
363
364  bool HasImtConflictMethod() const {
365    return imt_conflict_method_ != nullptr;
366  }
367
368  void ClearImtConflictMethod() {
369    imt_conflict_method_ = nullptr;
370  }
371
372  void FixupConflictTables();
373  void SetImtConflictMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
374  void SetImtUnimplementedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
375
376  ArtMethod* CreateImtConflictMethod(LinearAlloc* linear_alloc)
377      REQUIRES_SHARED(Locks::mutator_lock_);
378
379  void ClearImtUnimplementedMethod() {
380    imt_unimplemented_method_ = nullptr;
381  }
382
383  bool HasCalleeSaveMethod(CalleeSaveType type) const {
384    return callee_save_methods_[static_cast<size_t>(type)] != 0u;
385  }
386
387  ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
388      REQUIRES_SHARED(Locks::mutator_lock_);
389
390  ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
391      REQUIRES_SHARED(Locks::mutator_lock_);
392
393  QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const {
394    return callee_save_method_frame_infos_[static_cast<size_t>(type)];
395  }
396
397  QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
398      REQUIRES_SHARED(Locks::mutator_lock_);
399
400  static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
401    return OFFSETOF_MEMBER(Runtime, callee_save_methods_[static_cast<size_t>(type)]);
402  }
403
404  InstructionSet GetInstructionSet() const {
405    return instruction_set_;
406  }
407
408  void SetInstructionSet(InstructionSet instruction_set);
409  void ClearInstructionSet();
410
411  void SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type);
412  void ClearCalleeSaveMethods();
413
414  ArtMethod* CreateCalleeSaveMethod() REQUIRES_SHARED(Locks::mutator_lock_);
415
416  int32_t GetStat(int kind);
417
418  RuntimeStats* GetStats() {
419    return &stats_;
420  }
421
422  bool HasStatsEnabled() const {
423    return stats_enabled_;
424  }
425
426  void ResetStats(int kinds);
427
428  void SetStatsEnabled(bool new_state)
429      REQUIRES(!Locks::instrument_entrypoints_lock_, !Locks::mutator_lock_);
430
431  enum class NativeBridgeAction {  // private
432    kUnload,
433    kInitialize
434  };
435
436  jit::Jit* GetJit() const {
437    return jit_.get();
438  }
439
440  // Returns true if JIT compilations are enabled. GetJit() will be not null in this case.
441  bool UseJitCompilation() const;
442
443  void PreZygoteFork();
444  void InitNonZygoteOrPostFork(
445      JNIEnv* env, bool is_system_server, NativeBridgeAction action, const char* isa);
446
447  const instrumentation::Instrumentation* GetInstrumentation() const {
448    return &instrumentation_;
449  }
450
451  instrumentation::Instrumentation* GetInstrumentation() {
452    return &instrumentation_;
453  }
454
455  void RegisterAppInfo(const std::vector<std::string>& code_paths,
456                       const std::string& profile_output_filename);
457
458  // Transaction support.
459  bool IsActiveTransaction() const;
460  void EnterTransactionMode();
461  void EnterTransactionMode(bool strict, mirror::Class* root);
462  void ExitTransactionMode();
463  void RollbackAllTransactions() REQUIRES_SHARED(Locks::mutator_lock_);
464  // Transaction rollback and exit transaction are always done together, it's convenience to
465  // do them in one function.
466  void RollbackAndExitTransactionMode() REQUIRES_SHARED(Locks::mutator_lock_);
467  bool IsTransactionAborted() const;
468  const std::unique_ptr<Transaction>& GetTransaction() const;
469  bool IsActiveStrictTransactionMode() const;
470
471  void AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message)
472      REQUIRES_SHARED(Locks::mutator_lock_);
473  void ThrowTransactionAbortError(Thread* self)
474      REQUIRES_SHARED(Locks::mutator_lock_);
475
476  void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
477                               bool is_volatile) const;
478  void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value,
479                            bool is_volatile) const;
480  void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value,
481                            bool is_volatile) const;
482  void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value,
483                          bool is_volatile) const;
484  void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
485                          bool is_volatile) const;
486  void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
487                          bool is_volatile) const;
488  void RecordWriteFieldReference(mirror::Object* obj,
489                                 MemberOffset field_offset,
490                                 ObjPtr<mirror::Object> value,
491                                 bool is_volatile) const
492      REQUIRES_SHARED(Locks::mutator_lock_);
493  void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const
494      REQUIRES_SHARED(Locks::mutator_lock_);
495  void RecordStrongStringInsertion(ObjPtr<mirror::String> s) const
496      REQUIRES(Locks::intern_table_lock_);
497  void RecordWeakStringInsertion(ObjPtr<mirror::String> s) const
498      REQUIRES(Locks::intern_table_lock_);
499  void RecordStrongStringRemoval(ObjPtr<mirror::String> s) const
500      REQUIRES(Locks::intern_table_lock_);
501  void RecordWeakStringRemoval(ObjPtr<mirror::String> s) const
502      REQUIRES(Locks::intern_table_lock_);
503  void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx) const
504      REQUIRES_SHARED(Locks::mutator_lock_);
505
506  void SetFaultMessage(const std::string& message) REQUIRES(!fault_message_lock_);
507  // Only read by the signal handler, NO_THREAD_SAFETY_ANALYSIS to prevent lock order violations
508  // with the unexpected_signal_lock_.
509  const std::string& GetFaultMessage() NO_THREAD_SAFETY_ANALYSIS {
510    return fault_message_;
511  }
512
513  void AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* arg_vector) const;
514
515  bool ExplicitStackOverflowChecks() const {
516    return !implicit_so_checks_;
517  }
518
519  void DisableVerifier();
520  bool IsVerificationEnabled() const;
521  bool IsVerificationSoftFail() const;
522
523  void SetHiddenApiChecksEnabled(bool value) {
524    do_hidden_api_checks_ = value;
525  }
526
527  bool AreHiddenApiChecksEnabled() const {
528    return do_hidden_api_checks_;
529  }
530
531  bool IsDexFileFallbackEnabled() const {
532    return allow_dex_file_fallback_;
533  }
534
535  const std::vector<std::string>& GetCpuAbilist() const {
536    return cpu_abilist_;
537  }
538
539  bool IsRunningOnMemoryTool() const {
540    return is_running_on_memory_tool_;
541  }
542
543  void SetTargetSdkVersion(int32_t version) {
544    target_sdk_version_ = version;
545  }
546
547  int32_t GetTargetSdkVersion() const {
548    return target_sdk_version_;
549  }
550
551  uint32_t GetZygoteMaxFailedBoots() const {
552    return zygote_max_failed_boots_;
553  }
554
555  bool AreExperimentalFlagsEnabled(ExperimentalFlags flags) {
556    return (experimental_flags_ & flags) != ExperimentalFlags::kNone;
557  }
558
559  // Create the JIT and instrumentation and code cache.
560  void CreateJit();
561
562  ArenaPool* GetArenaPool() {
563    return arena_pool_.get();
564  }
565  ArenaPool* GetJitArenaPool() {
566    return jit_arena_pool_.get();
567  }
568  const ArenaPool* GetArenaPool() const {
569    return arena_pool_.get();
570  }
571
572  void ReclaimArenaPoolMemory();
573
574  LinearAlloc* GetLinearAlloc() {
575    return linear_alloc_.get();
576  }
577
578  jit::JitOptions* GetJITOptions() {
579    return jit_options_.get();
580  }
581
582  bool IsJavaDebuggable() const {
583    return is_java_debuggable_;
584  }
585
586  void SetJavaDebuggable(bool value);
587
588  // Deoptimize the boot image, called for Java debuggable apps.
589  void DeoptimizeBootImage();
590
591  bool IsNativeDebuggable() const {
592    return is_native_debuggable_;
593  }
594
595  void SetNativeDebuggable(bool value) {
596    is_native_debuggable_ = value;
597  }
598
599  bool AreAsyncExceptionsThrown() const {
600    return async_exceptions_thrown_;
601  }
602
603  void SetAsyncExceptionsThrown() {
604    async_exceptions_thrown_ = true;
605  }
606
607  // Returns the build fingerprint, if set. Otherwise an empty string is returned.
608  std::string GetFingerprint() {
609    return fingerprint_;
610  }
611
612  // Called from class linker.
613  void SetSentinel(mirror::Object* sentinel) REQUIRES_SHARED(Locks::mutator_lock_);
614
615  // Create a normal LinearAlloc or low 4gb version if we are 64 bit AOT compiler.
616  LinearAlloc* CreateLinearAlloc();
617
618  OatFileManager& GetOatFileManager() const {
619    DCHECK(oat_file_manager_ != nullptr);
620    return *oat_file_manager_;
621  }
622
623  double GetHashTableMinLoadFactor() const;
624  double GetHashTableMaxLoadFactor() const;
625
626  void SetSafeMode(bool mode) {
627    safe_mode_ = mode;
628  }
629
630  bool GetDumpNativeStackOnSigQuit() const {
631    return dump_native_stack_on_sig_quit_;
632  }
633
634  bool GetPrunedDalvikCache() const {
635    return pruned_dalvik_cache_;
636  }
637
638  void SetPrunedDalvikCache(bool pruned) {
639    pruned_dalvik_cache_ = pruned;
640  }
641
642  void UpdateProcessState(ProcessState process_state);
643
644  // Returns true if we currently care about long mutator pause.
645  bool InJankPerceptibleProcessState() const {
646    return process_state_ == kProcessStateJankPerceptible;
647  }
648
649  void RegisterSensitiveThread() const;
650
651  void SetZygoteNoThreadSection(bool val) {
652    zygote_no_threads_ = val;
653  }
654
655  bool IsZygoteNoThreadSection() const {
656    return zygote_no_threads_;
657  }
658
659  // Returns if the code can be deoptimized asynchronously. Code may be compiled with some
660  // optimization that makes it impossible to deoptimize.
661  bool IsAsyncDeoptimizeable(uintptr_t code) const REQUIRES_SHARED(Locks::mutator_lock_);
662
663  // Returns a saved copy of the environment (getenv/setenv values).
664  // Used by Fork to protect against overwriting LD_LIBRARY_PATH, etc.
665  char** GetEnvSnapshot() const {
666    return env_snapshot_.GetSnapshot();
667  }
668
669  void AddSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
670  void RemoveSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
671
672  void AttachAgent(JNIEnv* env,
673                   const std::string& agent_arg,
674                   jobject class_loader,
675                   bool allow_non_debuggable_tooling = false);
676
677  const std::list<std::unique_ptr<ti::Agent>>& GetAgents() const {
678    return agents_;
679  }
680
681  RuntimeCallbacks* GetRuntimeCallbacks();
682
683  bool HasLoadedPlugins() const {
684    return !plugins_.empty();
685  }
686
687  void InitThreadGroups(Thread* self);
688
689  void SetDumpGCPerformanceOnShutdown(bool value) {
690    dump_gc_performance_on_shutdown_ = value;
691  }
692
693  void IncrementDeoptimizationCount(DeoptimizationKind kind) {
694    DCHECK_LE(kind, DeoptimizationKind::kLast);
695    deoptimization_counts_[static_cast<size_t>(kind)]++;
696  }
697
698  uint32_t GetNumberOfDeoptimizations() const {
699    uint32_t result = 0;
700    for (size_t i = 0; i <= static_cast<size_t>(DeoptimizationKind::kLast); ++i) {
701      result += deoptimization_counts_[i];
702    }
703    return result;
704  }
705
706  // Whether or not we use MADV_RANDOM on files that are thought to have random access patterns.
707  // This is beneficial for low RAM devices since it reduces page cache thrashing.
708  bool MAdviseRandomAccess() const {
709    return madvise_random_access_;
710  }
711
712  const std::string& GetJdwpOptions() {
713    return jdwp_options_;
714  }
715
716  JdwpProvider GetJdwpProvider() const {
717    return jdwp_provider_;
718  }
719
720  static constexpr int32_t kUnsetSdkVersion = 0u;
721
722 private:
723  static void InitPlatformSignalHandlers();
724
725  Runtime();
726
727  void BlockSignals();
728
729  bool Init(RuntimeArgumentMap&& runtime_options)
730      SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
731  void InitNativeMethods() REQUIRES(!Locks::mutator_lock_);
732  void RegisterRuntimeNativeMethods(JNIEnv* env);
733
734  void StartDaemonThreads();
735  void StartSignalCatcher();
736
737  void MaybeSaveJitProfilingInfo();
738
739  // Visit all of the thread roots.
740  void VisitThreadRoots(RootVisitor* visitor, VisitRootFlags flags)
741      REQUIRES_SHARED(Locks::mutator_lock_);
742
743  // Visit all other roots which must be done with mutators suspended.
744  void VisitNonConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags)
745      REQUIRES_SHARED(Locks::mutator_lock_);
746
747  // Constant roots are the roots which never change after the runtime is initialized, they only
748  // need to be visited once per GC cycle.
749  void VisitConstantRoots(RootVisitor* visitor)
750      REQUIRES_SHARED(Locks::mutator_lock_);
751
752  // A pointer to the active runtime or null.
753  static Runtime* instance_;
754
755  // NOTE: these must match the gc::ProcessState values as they come directly from the framework.
756  static constexpr int kProfileForground = 0;
757  static constexpr int kProfileBackground = 1;
758
759  static constexpr uint32_t kCalleeSaveSize = 6u;
760
761  // 64 bit so that we can share the same asm offsets for both 32 and 64 bits.
762  uint64_t callee_save_methods_[kCalleeSaveSize];
763  GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_;
764  GcRoot<mirror::Throwable> pre_allocated_NoClassDefFoundError_;
765  ArtMethod* resolution_method_;
766  ArtMethod* imt_conflict_method_;
767  // Unresolved method has the same behavior as the conflict method, it is used by the class linker
768  // for differentiating between unfilled imt slots vs conflict slots in superclasses.
769  ArtMethod* imt_unimplemented_method_;
770
771  // Special sentinel object used to invalid conditions in JNI (cleared weak references) and
772  // JDWP (invalid references).
773  GcRoot<mirror::Object> sentinel_;
774
775  InstructionSet instruction_set_;
776  QuickMethodFrameInfo callee_save_method_frame_infos_[kCalleeSaveSize];
777
778  CompilerCallbacks* compiler_callbacks_;
779  bool is_zygote_;
780  bool must_relocate_;
781  bool is_concurrent_gc_enabled_;
782  bool is_explicit_gc_disabled_;
783  bool dex2oat_enabled_;
784  bool image_dex2oat_enabled_;
785
786  std::string compiler_executable_;
787  std::string patchoat_executable_;
788  std::vector<std::string> compiler_options_;
789  std::vector<std::string> image_compiler_options_;
790  std::string image_location_;
791
792  std::string boot_class_path_string_;
793  std::string class_path_string_;
794  std::vector<std::string> properties_;
795
796  std::list<ti::AgentSpec> agent_specs_;
797  std::list<std::unique_ptr<ti::Agent>> agents_;
798  std::vector<Plugin> plugins_;
799
800  // The default stack size for managed threads created by the runtime.
801  size_t default_stack_size_;
802
803  gc::Heap* heap_;
804
805  std::unique_ptr<ArenaPool> jit_arena_pool_;
806  std::unique_ptr<ArenaPool> arena_pool_;
807  // Special low 4gb pool for compiler linear alloc. We need ArtFields to be in low 4gb if we are
808  // compiling using a 32 bit image on a 64 bit compiler in case we resolve things in the image
809  // since the field arrays are int arrays in this case.
810  std::unique_ptr<ArenaPool> low_4gb_arena_pool_;
811
812  // Shared linear alloc for now.
813  std::unique_ptr<LinearAlloc> linear_alloc_;
814
815  // The number of spins that are done before thread suspension is used to forcibly inflate.
816  size_t max_spins_before_thin_lock_inflation_;
817  MonitorList* monitor_list_;
818  MonitorPool* monitor_pool_;
819
820  ThreadList* thread_list_;
821
822  InternTable* intern_table_;
823
824  ClassLinker* class_linker_;
825
826  SignalCatcher* signal_catcher_;
827
828  // If true, the runtime will connect to tombstoned via a socket to
829  // request an open file descriptor to write its traces to.
830  bool use_tombstoned_traces_;
831
832  // Location to which traces must be written on SIGQUIT. Only used if
833  // tombstoned_traces_ == false.
834  std::string stack_trace_file_;
835
836  std::unique_ptr<JavaVMExt> java_vm_;
837
838  std::unique_ptr<jit::Jit> jit_;
839  std::unique_ptr<jit::JitOptions> jit_options_;
840
841  // Fault message, printed when we get a SIGSEGV.
842  Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
843  std::string fault_message_ GUARDED_BY(fault_message_lock_);
844
845  // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by
846  // the shutdown lock so that threads aren't born while we're shutting down.
847  size_t threads_being_born_ GUARDED_BY(Locks::runtime_shutdown_lock_);
848
849  // Waited upon until no threads are being born.
850  std::unique_ptr<ConditionVariable> shutdown_cond_ GUARDED_BY(Locks::runtime_shutdown_lock_);
851
852  // Set when runtime shutdown is past the point that new threads may attach.
853  bool shutting_down_ GUARDED_BY(Locks::runtime_shutdown_lock_);
854
855  // The runtime is starting to shutdown but is blocked waiting on shutdown_cond_.
856  bool shutting_down_started_ GUARDED_BY(Locks::runtime_shutdown_lock_);
857
858  bool started_;
859
860  // New flag added which tells us if the runtime has finished starting. If
861  // this flag is set then the Daemon threads are created and the class loader
862  // is created. This flag is needed for knowing if its safe to request CMS.
863  bool finished_starting_;
864
865  // Hooks supported by JNI_CreateJavaVM
866  jint (*vfprintf_)(FILE* stream, const char* format, va_list ap);
867  void (*exit_)(jint status);
868  void (*abort_)();
869
870  bool stats_enabled_;
871  RuntimeStats stats_;
872
873  const bool is_running_on_memory_tool_;
874
875  std::unique_ptr<TraceConfig> trace_config_;
876
877  instrumentation::Instrumentation instrumentation_;
878
879  jobject main_thread_group_;
880  jobject system_thread_group_;
881
882  // As returned by ClassLoader.getSystemClassLoader().
883  jobject system_class_loader_;
884
885  // If true, then we dump the GC cumulative timings on shutdown.
886  bool dump_gc_performance_on_shutdown_;
887
888  // Transactions used for pre-initializing classes at compilation time.
889  // Support nested transactions, maintain a list containing all transactions. Transactions are
890  // handled under a stack discipline. Because GC needs to go over all transactions, we choose list
891  // as substantial data structure instead of stack.
892  std::list<std::unique_ptr<Transaction>> preinitialization_transactions_;
893
894  // If kNone, verification is disabled. kEnable by default.
895  verifier::VerifyMode verify_;
896
897  // If true, the runtime may use dex files directly with the interpreter if an oat file is not
898  // available/usable.
899  bool allow_dex_file_fallback_;
900
901  // List of supported cpu abis.
902  std::vector<std::string> cpu_abilist_;
903
904  // Specifies target SDK version to allow workarounds for certain API levels.
905  int32_t target_sdk_version_;
906
907  // Implicit checks flags.
908  bool implicit_null_checks_;       // NullPointer checks are implicit.
909  bool implicit_so_checks_;         // StackOverflow checks are implicit.
910  bool implicit_suspend_checks_;    // Thread suspension checks are implicit.
911
912  // Whether or not the sig chain (and implicitly the fault handler) should be
913  // disabled. Tools like dex2oat or patchoat don't need them. This enables
914  // building a statically link version of dex2oat.
915  bool no_sig_chain_;
916
917  // Force the use of native bridge even if the app ISA matches the runtime ISA.
918  bool force_native_bridge_;
919
920  // Whether or not a native bridge has been loaded.
921  //
922  // The native bridge allows running native code compiled for a foreign ISA. The way it works is,
923  // if standard dlopen fails to load native library associated with native activity, it calls to
924  // the native bridge to load it and then gets the trampoline for the entry to native activity.
925  //
926  // The option 'native_bridge_library_filename' specifies the name of the native bridge.
927  // When non-empty the native bridge will be loaded from the given file. An empty value means
928  // that there's no native bridge.
929  bool is_native_bridge_loaded_;
930
931  // Whether we are running under native debugger.
932  bool is_native_debuggable_;
933
934  // whether or not any async exceptions have ever been thrown. This is used to speed up the
935  // MterpShouldSwitchInterpreters function.
936  bool async_exceptions_thrown_;
937
938  // Whether Java code needs to be debuggable.
939  bool is_java_debuggable_;
940
941  // The maximum number of failed boots we allow before pruning the dalvik cache
942  // and trying again. This option is only inspected when we're running as a
943  // zygote.
944  uint32_t zygote_max_failed_boots_;
945
946  // Enable experimental opcodes that aren't fully specified yet. The intent is to
947  // eventually publish them as public-usable opcodes, but they aren't ready yet.
948  //
949  // Experimental opcodes should not be used by other production code.
950  ExperimentalFlags experimental_flags_;
951
952  // Contains the build fingerprint, if given as a parameter.
953  std::string fingerprint_;
954
955  // Oat file manager, keeps track of what oat files are open.
956  OatFileManager* oat_file_manager_;
957
958  // Whether or not we are on a low RAM device.
959  bool is_low_memory_mode_;
960
961  // Whether or not we use MADV_RANDOM on files that are thought to have random access patterns.
962  // This is beneficial for low RAM devices since it reduces page cache thrashing.
963  bool madvise_random_access_;
964
965  // Whether the application should run in safe mode, that is, interpreter only.
966  bool safe_mode_;
967
968  // Whether access checks on hidden API should be performed.
969  bool do_hidden_api_checks_;
970
971  // Whether threads should dump their native stack on SIGQUIT.
972  bool dump_native_stack_on_sig_quit_;
973
974  // Whether the dalvik cache was pruned when initializing the runtime.
975  bool pruned_dalvik_cache_;
976
977  // Whether or not we currently care about pause times.
978  ProcessState process_state_;
979
980  // Whether zygote code is in a section that should not start threads.
981  bool zygote_no_threads_;
982
983  // The string containing requested jdwp options
984  std::string jdwp_options_;
985
986  // The jdwp provider we were configured with.
987  JdwpProvider jdwp_provider_;
988
989  // Saved environment.
990  class EnvSnapshot {
991   public:
992    EnvSnapshot() = default;
993    void TakeSnapshot();
994    char** GetSnapshot() const;
995
996   private:
997    std::unique_ptr<char*[]> c_env_vector_;
998    std::vector<std::unique_ptr<std::string>> name_value_pairs_;
999
1000    DISALLOW_COPY_AND_ASSIGN(EnvSnapshot);
1001  } env_snapshot_;
1002
1003  // Generic system-weak holders.
1004  std::vector<gc::AbstractSystemWeakHolder*> system_weak_holders_;
1005
1006  std::unique_ptr<RuntimeCallbacks> callbacks_;
1007
1008  std::atomic<uint32_t> deoptimization_counts_[
1009      static_cast<uint32_t>(DeoptimizationKind::kLast) + 1];
1010
1011  std::unique_ptr<MemMap> protected_fault_page_;
1012
1013  DISALLOW_COPY_AND_ASSIGN(Runtime);
1014};
1015
1016}  // namespace art
1017
1018#endif  // ART_RUNTIME_RUNTIME_H_
1019