runtime.h revision a5dc52c023be28850e4d7422655d96771cb472f5
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_RUNTIME_H_
18#define ART_RUNTIME_RUNTIME_H_
19
20#include <jni.h>
21#include <stdio.h>
22
23#include <iosfwd>
24#include <set>
25#include <string>
26#include <utility>
27#include <memory>
28#include <vector>
29
30#include "arch/instruction_set.h"
31#include "base/macros.h"
32#include "base/mutex.h"
33#include "deoptimization_kind.h"
34#include "dex/dex_file_types.h"
35#include "experimental_flags.h"
36#include "gc_root.h"
37#include "instrumentation.h"
38#include "jdwp_provider.h"
39#include "obj_ptr.h"
40#include "offsets.h"
41#include "process_state.h"
42#include "quick/quick_method_frame_info.h"
43#include "runtime_stats.h"
44
45namespace art {
46
47namespace gc {
48class AbstractSystemWeakHolder;
49class Heap;
50}  // namespace gc
51
52namespace hiddenapi {
53enum class EnforcementPolicy;
54}  // namespace hiddenapi
55
56namespace jit {
57class Jit;
58class JitOptions;
59}  // namespace jit
60
61namespace mirror {
62class Array;
63class ClassLoader;
64class DexCache;
65template<class T> class ObjectArray;
66template<class T> class PrimitiveArray;
67typedef PrimitiveArray<int8_t> ByteArray;
68class String;
69class Throwable;
70}  // namespace mirror
71namespace ti {
72class Agent;
73class AgentSpec;
74}  // namespace ti
75namespace verifier {
76class MethodVerifier;
77enum class VerifyMode : int8_t;
78}  // namespace verifier
79class ArenaPool;
80class ArtMethod;
81enum class CalleeSaveType: uint32_t;
82class ClassLinker;
83class CompilerCallbacks;
84class DexFile;
85class InternTable;
86class IsMarkedVisitor;
87class JavaVMExt;
88class LinearAlloc;
89class MemMap;
90class MonitorList;
91class MonitorPool;
92class NullPointerHandler;
93class OatFileManager;
94class Plugin;
95struct RuntimeArgumentMap;
96class RuntimeCallbacks;
97class SignalCatcher;
98class StackOverflowHandler;
99class SuspensionHandler;
100class ThreadList;
101class Trace;
102struct TraceConfig;
103class Transaction;
104
105typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
106
107class Runtime {
108 public:
109  // Parse raw runtime options.
110  static bool ParseOptions(const RuntimeOptions& raw_options,
111                           bool ignore_unrecognized,
112                           RuntimeArgumentMap* runtime_options);
113
114  // Creates and initializes a new runtime.
115  static bool Create(RuntimeArgumentMap&& runtime_options)
116      SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
117
118  // Creates and initializes a new runtime.
119  static bool Create(const RuntimeOptions& raw_options, bool ignore_unrecognized)
120      SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
121
122  // IsAotCompiler for compilers that don't have a running runtime. Only dex2oat currently.
123  bool IsAotCompiler() const {
124    return !UseJitCompilation() && IsCompiler();
125  }
126
127  // IsCompiler is any runtime which has a running compiler, either dex2oat or JIT.
128  bool IsCompiler() const {
129    return compiler_callbacks_ != nullptr;
130  }
131
132  // If a compiler, are we compiling a boot image?
133  bool IsCompilingBootImage() const;
134
135  bool CanRelocate() const;
136
137  bool ShouldRelocate() const {
138    return must_relocate_ && CanRelocate();
139  }
140
141  bool MustRelocateIfPossible() const {
142    return must_relocate_;
143  }
144
145  bool IsDex2OatEnabled() const {
146    return dex2oat_enabled_ && IsImageDex2OatEnabled();
147  }
148
149  bool IsImageDex2OatEnabled() const {
150    return image_dex2oat_enabled_;
151  }
152
153  CompilerCallbacks* GetCompilerCallbacks() {
154    return compiler_callbacks_;
155  }
156
157  void SetCompilerCallbacks(CompilerCallbacks* callbacks) {
158    CHECK(callbacks != nullptr);
159    compiler_callbacks_ = callbacks;
160  }
161
162  bool IsZygote() const {
163    return is_zygote_;
164  }
165
166  bool IsExplicitGcDisabled() const {
167    return is_explicit_gc_disabled_;
168  }
169
170  std::string GetCompilerExecutable() const;
171  std::string GetPatchoatExecutable() const;
172
173  const std::vector<std::string>& GetCompilerOptions() const {
174    return compiler_options_;
175  }
176
177  void AddCompilerOption(const std::string& option) {
178    compiler_options_.push_back(option);
179  }
180
181  const std::vector<std::string>& GetImageCompilerOptions() const {
182    return image_compiler_options_;
183  }
184
185  const std::string& GetImageLocation() const {
186    return image_location_;
187  }
188
189  // Starts a runtime, which may cause threads to be started and code to run.
190  bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_);
191
192  bool IsShuttingDown(Thread* self);
193  bool IsShuttingDownLocked() const REQUIRES(Locks::runtime_shutdown_lock_) {
194    return shutting_down_;
195  }
196
197  size_t NumberOfThreadsBeingBorn() const REQUIRES(Locks::runtime_shutdown_lock_) {
198    return threads_being_born_;
199  }
200
201  void StartThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) {
202    threads_being_born_++;
203  }
204
205  void EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_);
206
207  bool IsStarted() const {
208    return started_;
209  }
210
211  bool IsFinishedStarting() const {
212    return finished_starting_;
213  }
214
215  static Runtime* Current() {
216    return instance_;
217  }
218
219  // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
220  // callers should prefer.
221  NO_RETURN static void Abort(const char* msg) REQUIRES(!Locks::abort_lock_);
222
223  // Returns the "main" ThreadGroup, used when attaching user threads.
224  jobject GetMainThreadGroup() const;
225
226  // Returns the "system" ThreadGroup, used when attaching our internal threads.
227  jobject GetSystemThreadGroup() const;
228
229  // Returns the system ClassLoader which represents the CLASSPATH.
230  jobject GetSystemClassLoader() const;
231
232  // Attaches the calling native thread to the runtime.
233  bool AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
234                           bool create_peer);
235
236  void CallExitHook(jint status);
237
238  // Detaches the current native thread from the runtime.
239  void DetachCurrentThread() REQUIRES(!Locks::mutator_lock_);
240
241  void DumpDeoptimizations(std::ostream& os);
242  void DumpForSigQuit(std::ostream& os);
243  void DumpLockHolders(std::ostream& os);
244
245  ~Runtime();
246
247  const std::string& GetBootClassPathString() const {
248    return boot_class_path_string_;
249  }
250
251  const std::string& GetClassPathString() const {
252    return class_path_string_;
253  }
254
255  ClassLinker* GetClassLinker() const {
256    return class_linker_;
257  }
258
259  size_t GetDefaultStackSize() const {
260    return default_stack_size_;
261  }
262
263  gc::Heap* GetHeap() const {
264    return heap_;
265  }
266
267  InternTable* GetInternTable() const {
268    DCHECK(intern_table_ != nullptr);
269    return intern_table_;
270  }
271
272  JavaVMExt* GetJavaVM() const {
273    return java_vm_.get();
274  }
275
276  size_t GetMaxSpinsBeforeThinLockInflation() const {
277    return max_spins_before_thin_lock_inflation_;
278  }
279
280  MonitorList* GetMonitorList() const {
281    return monitor_list_;
282  }
283
284  MonitorPool* GetMonitorPool() const {
285    return monitor_pool_;
286  }
287
288  // Is the given object the special object used to mark a cleared JNI weak global?
289  bool IsClearedJniWeakGlobal(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
290
291  // Get the special object used to mark a cleared JNI weak global.
292  mirror::Object* GetClearedJniWeakGlobal() REQUIRES_SHARED(Locks::mutator_lock_);
293
294  mirror::Throwable* GetPreAllocatedOutOfMemoryError() REQUIRES_SHARED(Locks::mutator_lock_);
295
296  mirror::Throwable* GetPreAllocatedNoClassDefFoundError()
297      REQUIRES_SHARED(Locks::mutator_lock_);
298
299  const std::vector<std::string>& GetProperties() const {
300    return properties_;
301  }
302
303  ThreadList* GetThreadList() const {
304    return thread_list_;
305  }
306
307  static const char* GetVersion() {
308    return "2.1.0";
309  }
310
311  bool IsMethodHandlesEnabled() const {
312    return true;
313  }
314
315  void DisallowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
316  void AllowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
317  // broadcast_for_checkpoint is true when we broadcast for making blocking threads to respond to
318  // checkpoint requests. It's false when we broadcast to unblock blocking threads after system weak
319  // access is reenabled.
320  void BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint = false);
321
322  // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
323  // clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
324  void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots)
325      REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
326      REQUIRES_SHARED(Locks::mutator_lock_);
327
328  // Visit image roots, only used for hprof since the GC uses the image space mod union table
329  // instead.
330  void VisitImageRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
331
332  // Visit all of the roots we can safely visit concurrently.
333  void VisitConcurrentRoots(RootVisitor* visitor,
334                            VisitRootFlags flags = kVisitRootFlagAllRoots)
335      REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
336      REQUIRES_SHARED(Locks::mutator_lock_);
337
338  // Visit all of the non thread roots, we can do this with mutators unpaused.
339  void VisitNonThreadRoots(RootVisitor* visitor)
340      REQUIRES_SHARED(Locks::mutator_lock_);
341
342  void VisitTransactionRoots(RootVisitor* visitor)
343      REQUIRES_SHARED(Locks::mutator_lock_);
344
345  // Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the
346  // system weak is updated to be the visitor's returned value.
347  void SweepSystemWeaks(IsMarkedVisitor* visitor)
348      REQUIRES_SHARED(Locks::mutator_lock_);
349
350  // Returns a special method that calls into a trampoline for runtime method resolution
351  ArtMethod* GetResolutionMethod();
352
353  bool HasResolutionMethod() const {
354    return resolution_method_ != nullptr;
355  }
356
357  void SetResolutionMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
358  void ClearResolutionMethod() {
359    resolution_method_ = nullptr;
360  }
361
362  ArtMethod* CreateResolutionMethod() REQUIRES_SHARED(Locks::mutator_lock_);
363
364  // Returns a special method that calls into a trampoline for runtime imt conflicts.
365  ArtMethod* GetImtConflictMethod();
366  ArtMethod* GetImtUnimplementedMethod();
367
368  bool HasImtConflictMethod() const {
369    return imt_conflict_method_ != nullptr;
370  }
371
372  void ClearImtConflictMethod() {
373    imt_conflict_method_ = nullptr;
374  }
375
376  void FixupConflictTables();
377  void SetImtConflictMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
378  void SetImtUnimplementedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
379
380  ArtMethod* CreateImtConflictMethod(LinearAlloc* linear_alloc)
381      REQUIRES_SHARED(Locks::mutator_lock_);
382
383  void ClearImtUnimplementedMethod() {
384    imt_unimplemented_method_ = nullptr;
385  }
386
387  bool HasCalleeSaveMethod(CalleeSaveType type) const {
388    return callee_save_methods_[static_cast<size_t>(type)] != 0u;
389  }
390
391  ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
392      REQUIRES_SHARED(Locks::mutator_lock_);
393
394  ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
395      REQUIRES_SHARED(Locks::mutator_lock_);
396
397  QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const {
398    return callee_save_method_frame_infos_[static_cast<size_t>(type)];
399  }
400
401  QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
402      REQUIRES_SHARED(Locks::mutator_lock_);
403
404  static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
405    return OFFSETOF_MEMBER(Runtime, callee_save_methods_[static_cast<size_t>(type)]);
406  }
407
408  InstructionSet GetInstructionSet() const {
409    return instruction_set_;
410  }
411
412  void SetInstructionSet(InstructionSet instruction_set);
413  void ClearInstructionSet();
414
415  void SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type);
416  void ClearCalleeSaveMethods();
417
418  ArtMethod* CreateCalleeSaveMethod() REQUIRES_SHARED(Locks::mutator_lock_);
419
420  int32_t GetStat(int kind);
421
422  RuntimeStats* GetStats() {
423    return &stats_;
424  }
425
426  bool HasStatsEnabled() const {
427    return stats_enabled_;
428  }
429
430  void ResetStats(int kinds);
431
432  void SetStatsEnabled(bool new_state)
433      REQUIRES(!Locks::instrument_entrypoints_lock_, !Locks::mutator_lock_);
434
435  enum class NativeBridgeAction {  // private
436    kUnload,
437    kInitialize
438  };
439
440  jit::Jit* GetJit() const {
441    return jit_.get();
442  }
443
444  // Returns true if JIT compilations are enabled. GetJit() will be not null in this case.
445  bool UseJitCompilation() const;
446
447  void PreZygoteFork();
448  void InitNonZygoteOrPostFork(
449      JNIEnv* env, bool is_system_server, NativeBridgeAction action, const char* isa);
450
451  const instrumentation::Instrumentation* GetInstrumentation() const {
452    return &instrumentation_;
453  }
454
455  instrumentation::Instrumentation* GetInstrumentation() {
456    return &instrumentation_;
457  }
458
459  void RegisterAppInfo(const std::vector<std::string>& code_paths,
460                       const std::string& profile_output_filename);
461
462  // Transaction support.
463  bool IsActiveTransaction() const;
464  void EnterTransactionMode();
465  void EnterTransactionMode(bool strict, mirror::Class* root);
466  void ExitTransactionMode();
467  void RollbackAllTransactions() REQUIRES_SHARED(Locks::mutator_lock_);
468  // Transaction rollback and exit transaction are always done together, it's convenience to
469  // do them in one function.
470  void RollbackAndExitTransactionMode() REQUIRES_SHARED(Locks::mutator_lock_);
471  bool IsTransactionAborted() const;
472  const std::unique_ptr<Transaction>& GetTransaction() const;
473  bool IsActiveStrictTransactionMode() const;
474
475  void AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message)
476      REQUIRES_SHARED(Locks::mutator_lock_);
477  void ThrowTransactionAbortError(Thread* self)
478      REQUIRES_SHARED(Locks::mutator_lock_);
479
480  void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
481                               bool is_volatile) const;
482  void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value,
483                            bool is_volatile) const;
484  void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value,
485                            bool is_volatile) const;
486  void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value,
487                          bool is_volatile) const;
488  void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
489                          bool is_volatile) const;
490  void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
491                          bool is_volatile) const;
492  void RecordWriteFieldReference(mirror::Object* obj,
493                                 MemberOffset field_offset,
494                                 ObjPtr<mirror::Object> value,
495                                 bool is_volatile) const
496      REQUIRES_SHARED(Locks::mutator_lock_);
497  void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const
498      REQUIRES_SHARED(Locks::mutator_lock_);
499  void RecordStrongStringInsertion(ObjPtr<mirror::String> s) const
500      REQUIRES(Locks::intern_table_lock_);
501  void RecordWeakStringInsertion(ObjPtr<mirror::String> s) const
502      REQUIRES(Locks::intern_table_lock_);
503  void RecordStrongStringRemoval(ObjPtr<mirror::String> s) const
504      REQUIRES(Locks::intern_table_lock_);
505  void RecordWeakStringRemoval(ObjPtr<mirror::String> s) const
506      REQUIRES(Locks::intern_table_lock_);
507  void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx) const
508      REQUIRES_SHARED(Locks::mutator_lock_);
509
510  void SetFaultMessage(const std::string& message) REQUIRES(!fault_message_lock_);
511  // Only read by the signal handler, NO_THREAD_SAFETY_ANALYSIS to prevent lock order violations
512  // with the unexpected_signal_lock_.
513  const std::string& GetFaultMessage() NO_THREAD_SAFETY_ANALYSIS {
514    return fault_message_;
515  }
516
517  void AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* arg_vector) const;
518
519  bool ExplicitStackOverflowChecks() const {
520    return !implicit_so_checks_;
521  }
522
523  void DisableVerifier();
524  bool IsVerificationEnabled() const;
525  bool IsVerificationSoftFail() const;
526
527  void SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy) {
528    hidden_api_policy_ = policy;
529  }
530
531  hiddenapi::EnforcementPolicy GetHiddenApiEnforcementPolicy() const {
532    return hidden_api_policy_;
533  }
534
535  void SetPendingHiddenApiWarning(bool value) {
536    pending_hidden_api_warning_ = value;
537  }
538
539  bool HasPendingHiddenApiWarning() const {
540    return pending_hidden_api_warning_;
541  }
542
543  void SetDedupeHiddenApiWarnings(bool value) {
544    dedupe_hidden_api_warnings_ = value;
545  }
546
547  bool ShouldDedupeHiddenApiWarnings() {
548    return dedupe_hidden_api_warnings_;
549  }
550
551  void AlwaysSetHiddenApiWarningFlag() {
552    always_set_hidden_api_warning_flag_ = true;
553  }
554
555  bool ShouldAlwaysSetHiddenApiWarningFlag() const {
556    return always_set_hidden_api_warning_flag_;
557  }
558
559  bool IsDexFileFallbackEnabled() const {
560    return allow_dex_file_fallback_;
561  }
562
563  const std::vector<std::string>& GetCpuAbilist() const {
564    return cpu_abilist_;
565  }
566
567  bool IsRunningOnMemoryTool() const {
568    return is_running_on_memory_tool_;
569  }
570
571  void SetTargetSdkVersion(int32_t version) {
572    target_sdk_version_ = version;
573  }
574
575  int32_t GetTargetSdkVersion() const {
576    return target_sdk_version_;
577  }
578
579  uint32_t GetZygoteMaxFailedBoots() const {
580    return zygote_max_failed_boots_;
581  }
582
583  bool AreExperimentalFlagsEnabled(ExperimentalFlags flags) {
584    return (experimental_flags_ & flags) != ExperimentalFlags::kNone;
585  }
586
587  // Create the JIT and instrumentation and code cache.
588  void CreateJit();
589
590  ArenaPool* GetArenaPool() {
591    return arena_pool_.get();
592  }
593  ArenaPool* GetJitArenaPool() {
594    return jit_arena_pool_.get();
595  }
596  const ArenaPool* GetArenaPool() const {
597    return arena_pool_.get();
598  }
599
600  void ReclaimArenaPoolMemory();
601
602  LinearAlloc* GetLinearAlloc() {
603    return linear_alloc_.get();
604  }
605
606  jit::JitOptions* GetJITOptions() {
607    return jit_options_.get();
608  }
609
610  bool IsJavaDebuggable() const {
611    return is_java_debuggable_;
612  }
613
614  void SetJavaDebuggable(bool value);
615
616  // Deoptimize the boot image, called for Java debuggable apps.
617  void DeoptimizeBootImage();
618
619  bool IsNativeDebuggable() const {
620    return is_native_debuggable_;
621  }
622
623  void SetNativeDebuggable(bool value) {
624    is_native_debuggable_ = value;
625  }
626
627  bool AreAsyncExceptionsThrown() const {
628    return async_exceptions_thrown_;
629  }
630
631  void SetAsyncExceptionsThrown() {
632    async_exceptions_thrown_ = true;
633  }
634
635  // Returns the build fingerprint, if set. Otherwise an empty string is returned.
636  std::string GetFingerprint() {
637    return fingerprint_;
638  }
639
640  // Called from class linker.
641  void SetSentinel(mirror::Object* sentinel) REQUIRES_SHARED(Locks::mutator_lock_);
642
643  // Create a normal LinearAlloc or low 4gb version if we are 64 bit AOT compiler.
644  LinearAlloc* CreateLinearAlloc();
645
646  OatFileManager& GetOatFileManager() const {
647    DCHECK(oat_file_manager_ != nullptr);
648    return *oat_file_manager_;
649  }
650
651  double GetHashTableMinLoadFactor() const;
652  double GetHashTableMaxLoadFactor() const;
653
654  void SetSafeMode(bool mode) {
655    safe_mode_ = mode;
656  }
657
658  bool GetDumpNativeStackOnSigQuit() const {
659    return dump_native_stack_on_sig_quit_;
660  }
661
662  bool GetPrunedDalvikCache() const {
663    return pruned_dalvik_cache_;
664  }
665
666  void SetPrunedDalvikCache(bool pruned) {
667    pruned_dalvik_cache_ = pruned;
668  }
669
670  void UpdateProcessState(ProcessState process_state);
671
672  // Returns true if we currently care about long mutator pause.
673  bool InJankPerceptibleProcessState() const {
674    return process_state_ == kProcessStateJankPerceptible;
675  }
676
677  void RegisterSensitiveThread() const;
678
679  void SetZygoteNoThreadSection(bool val) {
680    zygote_no_threads_ = val;
681  }
682
683  bool IsZygoteNoThreadSection() const {
684    return zygote_no_threads_;
685  }
686
687  // Returns if the code can be deoptimized asynchronously. Code may be compiled with some
688  // optimization that makes it impossible to deoptimize.
689  bool IsAsyncDeoptimizeable(uintptr_t code) const REQUIRES_SHARED(Locks::mutator_lock_);
690
691  // Returns a saved copy of the environment (getenv/setenv values).
692  // Used by Fork to protect against overwriting LD_LIBRARY_PATH, etc.
693  char** GetEnvSnapshot() const {
694    return env_snapshot_.GetSnapshot();
695  }
696
697  void AddSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
698  void RemoveSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
699
700  void AttachAgent(JNIEnv* env,
701                   const std::string& agent_arg,
702                   jobject class_loader,
703                   bool allow_non_debuggable_tooling = false);
704
705  const std::list<std::unique_ptr<ti::Agent>>& GetAgents() const {
706    return agents_;
707  }
708
709  RuntimeCallbacks* GetRuntimeCallbacks();
710
711  bool HasLoadedPlugins() const {
712    return !plugins_.empty();
713  }
714
715  void InitThreadGroups(Thread* self);
716
717  void SetDumpGCPerformanceOnShutdown(bool value) {
718    dump_gc_performance_on_shutdown_ = value;
719  }
720
721  void IncrementDeoptimizationCount(DeoptimizationKind kind) {
722    DCHECK_LE(kind, DeoptimizationKind::kLast);
723    deoptimization_counts_[static_cast<size_t>(kind)]++;
724  }
725
726  uint32_t GetNumberOfDeoptimizations() const {
727    uint32_t result = 0;
728    for (size_t i = 0; i <= static_cast<size_t>(DeoptimizationKind::kLast); ++i) {
729      result += deoptimization_counts_[i];
730    }
731    return result;
732  }
733
734  // Whether or not we use MADV_RANDOM on files that are thought to have random access patterns.
735  // This is beneficial for low RAM devices since it reduces page cache thrashing.
736  bool MAdviseRandomAccess() const {
737    return madvise_random_access_;
738  }
739
740  const std::string& GetJdwpOptions() {
741    return jdwp_options_;
742  }
743
744  JdwpProvider GetJdwpProvider() const {
745    return jdwp_provider_;
746  }
747
748  static constexpr int32_t kUnsetSdkVersion = 0u;
749
750 private:
751  static void InitPlatformSignalHandlers();
752
753  Runtime();
754
755  void BlockSignals();
756
757  bool Init(RuntimeArgumentMap&& runtime_options)
758      SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
759  void InitNativeMethods() REQUIRES(!Locks::mutator_lock_);
760  void RegisterRuntimeNativeMethods(JNIEnv* env);
761
762  void StartDaemonThreads();
763  void StartSignalCatcher();
764
765  void MaybeSaveJitProfilingInfo();
766
767  // Visit all of the thread roots.
768  void VisitThreadRoots(RootVisitor* visitor, VisitRootFlags flags)
769      REQUIRES_SHARED(Locks::mutator_lock_);
770
771  // Visit all other roots which must be done with mutators suspended.
772  void VisitNonConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags)
773      REQUIRES_SHARED(Locks::mutator_lock_);
774
775  // Constant roots are the roots which never change after the runtime is initialized, they only
776  // need to be visited once per GC cycle.
777  void VisitConstantRoots(RootVisitor* visitor)
778      REQUIRES_SHARED(Locks::mutator_lock_);
779
780  // A pointer to the active runtime or null.
781  static Runtime* instance_;
782
783  // NOTE: these must match the gc::ProcessState values as they come directly from the framework.
784  static constexpr int kProfileForground = 0;
785  static constexpr int kProfileBackground = 1;
786
787  static constexpr uint32_t kCalleeSaveSize = 6u;
788
789  // 64 bit so that we can share the same asm offsets for both 32 and 64 bits.
790  uint64_t callee_save_methods_[kCalleeSaveSize];
791  GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_;
792  GcRoot<mirror::Throwable> pre_allocated_NoClassDefFoundError_;
793  ArtMethod* resolution_method_;
794  ArtMethod* imt_conflict_method_;
795  // Unresolved method has the same behavior as the conflict method, it is used by the class linker
796  // for differentiating between unfilled imt slots vs conflict slots in superclasses.
797  ArtMethod* imt_unimplemented_method_;
798
799  // Special sentinel object used to invalid conditions in JNI (cleared weak references) and
800  // JDWP (invalid references).
801  GcRoot<mirror::Object> sentinel_;
802
803  InstructionSet instruction_set_;
804  QuickMethodFrameInfo callee_save_method_frame_infos_[kCalleeSaveSize];
805
806  CompilerCallbacks* compiler_callbacks_;
807  bool is_zygote_;
808  bool must_relocate_;
809  bool is_concurrent_gc_enabled_;
810  bool is_explicit_gc_disabled_;
811  bool dex2oat_enabled_;
812  bool image_dex2oat_enabled_;
813
814  std::string compiler_executable_;
815  std::string patchoat_executable_;
816  std::vector<std::string> compiler_options_;
817  std::vector<std::string> image_compiler_options_;
818  std::string image_location_;
819
820  std::string boot_class_path_string_;
821  std::string class_path_string_;
822  std::vector<std::string> properties_;
823
824  std::list<ti::AgentSpec> agent_specs_;
825  std::list<std::unique_ptr<ti::Agent>> agents_;
826  std::vector<Plugin> plugins_;
827
828  // The default stack size for managed threads created by the runtime.
829  size_t default_stack_size_;
830
831  gc::Heap* heap_;
832
833  std::unique_ptr<ArenaPool> jit_arena_pool_;
834  std::unique_ptr<ArenaPool> arena_pool_;
835  // Special low 4gb pool for compiler linear alloc. We need ArtFields to be in low 4gb if we are
836  // compiling using a 32 bit image on a 64 bit compiler in case we resolve things in the image
837  // since the field arrays are int arrays in this case.
838  std::unique_ptr<ArenaPool> low_4gb_arena_pool_;
839
840  // Shared linear alloc for now.
841  std::unique_ptr<LinearAlloc> linear_alloc_;
842
843  // The number of spins that are done before thread suspension is used to forcibly inflate.
844  size_t max_spins_before_thin_lock_inflation_;
845  MonitorList* monitor_list_;
846  MonitorPool* monitor_pool_;
847
848  ThreadList* thread_list_;
849
850  InternTable* intern_table_;
851
852  ClassLinker* class_linker_;
853
854  SignalCatcher* signal_catcher_;
855
856  // If true, the runtime will connect to tombstoned via a socket to
857  // request an open file descriptor to write its traces to.
858  bool use_tombstoned_traces_;
859
860  // Location to which traces must be written on SIGQUIT. Only used if
861  // tombstoned_traces_ == false.
862  std::string stack_trace_file_;
863
864  std::unique_ptr<JavaVMExt> java_vm_;
865
866  std::unique_ptr<jit::Jit> jit_;
867  std::unique_ptr<jit::JitOptions> jit_options_;
868
869  // Fault message, printed when we get a SIGSEGV.
870  Mutex fault_message_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
871  std::string fault_message_ GUARDED_BY(fault_message_lock_);
872
873  // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by
874  // the shutdown lock so that threads aren't born while we're shutting down.
875  size_t threads_being_born_ GUARDED_BY(Locks::runtime_shutdown_lock_);
876
877  // Waited upon until no threads are being born.
878  std::unique_ptr<ConditionVariable> shutdown_cond_ GUARDED_BY(Locks::runtime_shutdown_lock_);
879
880  // Set when runtime shutdown is past the point that new threads may attach.
881  bool shutting_down_ GUARDED_BY(Locks::runtime_shutdown_lock_);
882
883  // The runtime is starting to shutdown but is blocked waiting on shutdown_cond_.
884  bool shutting_down_started_ GUARDED_BY(Locks::runtime_shutdown_lock_);
885
886  bool started_;
887
888  // New flag added which tells us if the runtime has finished starting. If
889  // this flag is set then the Daemon threads are created and the class loader
890  // is created. This flag is needed for knowing if its safe to request CMS.
891  bool finished_starting_;
892
893  // Hooks supported by JNI_CreateJavaVM
894  jint (*vfprintf_)(FILE* stream, const char* format, va_list ap);
895  void (*exit_)(jint status);
896  void (*abort_)();
897
898  bool stats_enabled_;
899  RuntimeStats stats_;
900
901  const bool is_running_on_memory_tool_;
902
903  std::unique_ptr<TraceConfig> trace_config_;
904
905  instrumentation::Instrumentation instrumentation_;
906
907  jobject main_thread_group_;
908  jobject system_thread_group_;
909
910  // As returned by ClassLoader.getSystemClassLoader().
911  jobject system_class_loader_;
912
913  // If true, then we dump the GC cumulative timings on shutdown.
914  bool dump_gc_performance_on_shutdown_;
915
916  // Transactions used for pre-initializing classes at compilation time.
917  // Support nested transactions, maintain a list containing all transactions. Transactions are
918  // handled under a stack discipline. Because GC needs to go over all transactions, we choose list
919  // as substantial data structure instead of stack.
920  std::list<std::unique_ptr<Transaction>> preinitialization_transactions_;
921
922  // If kNone, verification is disabled. kEnable by default.
923  verifier::VerifyMode verify_;
924
925  // If true, the runtime may use dex files directly with the interpreter if an oat file is not
926  // available/usable.
927  bool allow_dex_file_fallback_;
928
929  // List of supported cpu abis.
930  std::vector<std::string> cpu_abilist_;
931
932  // Specifies target SDK version to allow workarounds for certain API levels.
933  int32_t target_sdk_version_;
934
935  // Implicit checks flags.
936  bool implicit_null_checks_;       // NullPointer checks are implicit.
937  bool implicit_so_checks_;         // StackOverflow checks are implicit.
938  bool implicit_suspend_checks_;    // Thread suspension checks are implicit.
939
940  // Whether or not the sig chain (and implicitly the fault handler) should be
941  // disabled. Tools like dex2oat or patchoat don't need them. This enables
942  // building a statically link version of dex2oat.
943  bool no_sig_chain_;
944
945  // Force the use of native bridge even if the app ISA matches the runtime ISA.
946  bool force_native_bridge_;
947
948  // Whether or not a native bridge has been loaded.
949  //
950  // The native bridge allows running native code compiled for a foreign ISA. The way it works is,
951  // if standard dlopen fails to load native library associated with native activity, it calls to
952  // the native bridge to load it and then gets the trampoline for the entry to native activity.
953  //
954  // The option 'native_bridge_library_filename' specifies the name of the native bridge.
955  // When non-empty the native bridge will be loaded from the given file. An empty value means
956  // that there's no native bridge.
957  bool is_native_bridge_loaded_;
958
959  // Whether we are running under native debugger.
960  bool is_native_debuggable_;
961
962  // whether or not any async exceptions have ever been thrown. This is used to speed up the
963  // MterpShouldSwitchInterpreters function.
964  bool async_exceptions_thrown_;
965
966  // Whether Java code needs to be debuggable.
967  bool is_java_debuggable_;
968
969  // The maximum number of failed boots we allow before pruning the dalvik cache
970  // and trying again. This option is only inspected when we're running as a
971  // zygote.
972  uint32_t zygote_max_failed_boots_;
973
974  // Enable experimental opcodes that aren't fully specified yet. The intent is to
975  // eventually publish them as public-usable opcodes, but they aren't ready yet.
976  //
977  // Experimental opcodes should not be used by other production code.
978  ExperimentalFlags experimental_flags_;
979
980  // Contains the build fingerprint, if given as a parameter.
981  std::string fingerprint_;
982
983  // Oat file manager, keeps track of what oat files are open.
984  OatFileManager* oat_file_manager_;
985
986  // Whether or not we are on a low RAM device.
987  bool is_low_memory_mode_;
988
989  // Whether or not we use MADV_RANDOM on files that are thought to have random access patterns.
990  // This is beneficial for low RAM devices since it reduces page cache thrashing.
991  bool madvise_random_access_;
992
993  // Whether the application should run in safe mode, that is, interpreter only.
994  bool safe_mode_;
995
996  // Whether access checks on hidden API should be performed.
997  hiddenapi::EnforcementPolicy hidden_api_policy_;
998
999  // Whether the application has used an API which is not restricted but we
1000  // should issue a warning about it.
1001  bool pending_hidden_api_warning_;
1002
1003  // Do not warn about the same hidden API access violation twice.
1004  // This is only used for testing.
1005  bool dedupe_hidden_api_warnings_;
1006
1007  // Hidden API can print warnings into the log and/or set a flag read by the
1008  // framework to show a UI warning. If this flag is set, always set the flag
1009  // when there is a warning. This is only used for testing.
1010  bool always_set_hidden_api_warning_flag_;
1011
1012  // Whether threads should dump their native stack on SIGQUIT.
1013  bool dump_native_stack_on_sig_quit_;
1014
1015  // Whether the dalvik cache was pruned when initializing the runtime.
1016  bool pruned_dalvik_cache_;
1017
1018  // Whether or not we currently care about pause times.
1019  ProcessState process_state_;
1020
1021  // Whether zygote code is in a section that should not start threads.
1022  bool zygote_no_threads_;
1023
1024  // The string containing requested jdwp options
1025  std::string jdwp_options_;
1026
1027  // The jdwp provider we were configured with.
1028  JdwpProvider jdwp_provider_;
1029
1030  // Saved environment.
1031  class EnvSnapshot {
1032   public:
1033    EnvSnapshot() = default;
1034    void TakeSnapshot();
1035    char** GetSnapshot() const;
1036
1037   private:
1038    std::unique_ptr<char*[]> c_env_vector_;
1039    std::vector<std::unique_ptr<std::string>> name_value_pairs_;
1040
1041    DISALLOW_COPY_AND_ASSIGN(EnvSnapshot);
1042  } env_snapshot_;
1043
1044  // Generic system-weak holders.
1045  std::vector<gc::AbstractSystemWeakHolder*> system_weak_holders_;
1046
1047  std::unique_ptr<RuntimeCallbacks> callbacks_;
1048
1049  std::atomic<uint32_t> deoptimization_counts_[
1050      static_cast<uint32_t>(DeoptimizationKind::kLast) + 1];
1051
1052  std::unique_ptr<MemMap> protected_fault_page_;
1053
1054  DISALLOW_COPY_AND_ASSIGN(Runtime);
1055};
1056
1057}  // namespace art
1058
1059#endif  // ART_RUNTIME_RUNTIME_H_
1060